blk-merge.c 17.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Functions related to segment and merge handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include "blk.h"

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
static struct bio *blk_bio_discard_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	unsigned int max_discard_sectors, granularity;
	int alignment;
	sector_t tmp;
	unsigned split_sectors;

	/* Zero-sector (unknown) and one-sector granularities are the same.  */
	granularity = max(q->limits.discard_granularity >> 9, 1U);

	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	max_discard_sectors -= max_discard_sectors % granularity;

	if (unlikely(!max_discard_sectors)) {
		/* XXX: warn */
		return NULL;
	}

	if (bio_sectors(bio) <= max_discard_sectors)
		return NULL;

	split_sectors = max_discard_sectors;

	/*
	 * If the next starting sector would be misaligned, stop the discard at
	 * the previous aligned sector.
	 */
	alignment = (q->limits.discard_alignment >> 9) % granularity;

	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
	tmp = sector_div(tmp, granularity);

	if (split_sectors > tmp)
		split_sectors -= tmp;

	return bio_split(bio, split_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_write_same_split(struct request_queue *q,
					    struct bio *bio,
					    struct bio_set *bs)
{
	if (!q->limits.max_write_same_sectors)
		return NULL;

	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
		return NULL;

	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_segment_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	struct bio *split;
	struct bio_vec bv, bvprv;
	struct bvec_iter iter;
72
	unsigned seg_size = 0, nsegs = 0, sectors = 0;
73 74 75
	int prev = 0;

	bio_for_each_segment(bv, bio, iter) {
76
		sectors += bv.bv_len >> 9;
77

78
		if (sectors > queue_max_sectors(q))
79 80 81 82 83 84
			goto split;

		/*
		 * If the queue doesn't support SG gaps and adding this
		 * offset would create a gap, disallow it.
		 */
85
		if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset))
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
			goto split;

		if (prev && blk_queue_cluster(q)) {
			if (seg_size + bv.bv_len > queue_max_segment_size(q))
				goto new_segment;
			if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
				goto new_segment;
			if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
				goto new_segment;

			seg_size += bv.bv_len;
			bvprv = bv;
			prev = 1;
			continue;
		}
new_segment:
		if (nsegs == queue_max_segments(q))
			goto split;

		nsegs++;
		bvprv = bv;
		prev = 1;
		seg_size = bv.bv_len;
	}

	return NULL;
split:
	split = bio_clone_bioset(bio, GFP_NOIO, bs);

	split->bi_iter.bi_size -= iter.bi_size;
	bio->bi_iter = iter;

	if (bio_integrity(bio)) {
		bio_integrity_advance(bio, split->bi_iter.bi_size);
		bio_integrity_trim(split, 0, bio_sectors(split));
	}

	return split;
}

void blk_queue_split(struct request_queue *q, struct bio **bio,
		     struct bio_set *bs)
{
	struct bio *split;

	if ((*bio)->bi_rw & REQ_DISCARD)
		split = blk_bio_discard_split(q, *bio, bs);
	else if ((*bio)->bi_rw & REQ_WRITE_SAME)
		split = blk_bio_write_same_split(q, *bio, bs);
	else
		split = blk_bio_segment_split(q, *bio, q->bio_split);

	if (split) {
		bio_chain(split, *bio);
		generic_make_request(*bio);
		*bio = split;
	}
}
EXPORT_SYMBOL(blk_queue_split);

146
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
Ming Lei's avatar
Ming Lei committed
147 148
					     struct bio *bio,
					     bool no_sg_merge)
149
{
150
	struct bio_vec bv, bvprv = { NULL };
151
	int cluster, prev = 0;
152
	unsigned int seg_size, nr_phys_segs;
153
	struct bio *fbio, *bbio;
154
	struct bvec_iter iter;
155

156 157
	if (!bio)
		return 0;
158

159 160 161 162 163 164 165 166 167 168
	/*
	 * This should probably be returning 0, but blk_add_request_payload()
	 * (Christoph!!!!)
	 */
	if (bio->bi_rw & REQ_DISCARD)
		return 1;

	if (bio->bi_rw & REQ_WRITE_SAME)
		return 1;

169
	fbio = bio;
170
	cluster = blk_queue_cluster(q);
Mikulas Patocka's avatar
Mikulas Patocka committed
171
	seg_size = 0;
172
	nr_phys_segs = 0;
173
	for_each_bio(bio) {
174
		bio_for_each_segment(bv, bio, iter) {
175 176 177 178 179 180 181
			/*
			 * If SG merging is disabled, each bio vector is
			 * a segment
			 */
			if (no_sg_merge)
				goto new_segment;

182
			if (prev && cluster) {
183
				if (seg_size + bv.bv_len
184
				    > queue_max_segment_size(q))
185
					goto new_segment;
186
				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
187
					goto new_segment;
188
				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
189
					goto new_segment;
190

191
				seg_size += bv.bv_len;
192 193 194
				bvprv = bv;
				continue;
			}
195
new_segment:
196 197 198
			if (nr_phys_segs == 1 && seg_size >
			    fbio->bi_seg_front_size)
				fbio->bi_seg_front_size = seg_size;
199

200 201
			nr_phys_segs++;
			bvprv = bv;
202
			prev = 1;
203
			seg_size = bv.bv_len;
204
		}
205
		bbio = bio;
206 207
	}

208 209 210 211
	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
		fbio->bi_seg_front_size = seg_size;
	if (seg_size > bbio->bi_seg_back_size)
		bbio->bi_seg_back_size = seg_size;
212 213 214 215 216 217

	return nr_phys_segs;
}

void blk_recalc_rq_segments(struct request *rq)
{
Ming Lei's avatar
Ming Lei committed
218 219 220 221 222
	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
			&rq->q->queue_flags);

	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
			no_sg_merge);
223 224 225 226
}

void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
227 228 229 230 231 232 233
	unsigned short seg_cnt;

	/* estimate segment number by bi_vcnt for non-cloned bio */
	if (bio_flagged(bio, BIO_CLONED))
		seg_cnt = bio_segments(bio);
	else
		seg_cnt = bio->bi_vcnt;
234

235 236 237
	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
			(seg_cnt < queue_max_segments(q)))
		bio->bi_phys_segments = seg_cnt;
238 239 240 241
	else {
		struct bio *nxt = bio->bi_next;

		bio->bi_next = NULL;
242
		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
243 244
		bio->bi_next = nxt;
	}
245

246
	bio_set_flag(bio, BIO_SEG_VALID);
247 248 249 250 251 252
}
EXPORT_SYMBOL(blk_recount_segments);

static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
253
	struct bio_vec end_bv = { NULL }, nxt_bv;
254 255
	struct bvec_iter iter;

256
	if (!blk_queue_cluster(q))
257 258
		return 0;

259
	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
260
	    queue_max_segment_size(q))
261 262
		return 0;

263 264 265
	if (!bio_has_data(bio))
		return 1;

266 267 268 269 270 271 272
	bio_for_each_segment(end_bv, bio, iter)
		if (end_bv.bv_len == iter.bi_size)
			break;

	nxt_bv = bio_iovec(nxt);

	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
273 274
		return 0;

275
	/*
276
	 * bio and nxt are contiguous in memory; check if the queue allows
277 278
	 * these two to be merged into one
	 */
279
	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
280 281 282 283 284
		return 1;

	return 0;
}

285
static inline void
286
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
287
		     struct scatterlist *sglist, struct bio_vec *bvprv,
288 289 290 291 292
		     struct scatterlist **sg, int *nsegs, int *cluster)
{

	int nbytes = bvec->bv_len;

293
	if (*sg && *cluster) {
294 295 296
		if ((*sg)->length + nbytes > queue_max_segment_size(q))
			goto new_segment;

297
		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
298
			goto new_segment;
299
		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
			goto new_segment;

		(*sg)->length += nbytes;
	} else {
new_segment:
		if (!*sg)
			*sg = sglist;
		else {
			/*
			 * If the driver previously mapped a shorter
			 * list, we could see a termination bit
			 * prematurely unless it fully inits the sg
			 * table on each mapping. We KNOW that there
			 * must be more entries here or the driver
			 * would be buggy, so force clear the
			 * termination bit to avoid doing a full
			 * sg_init_table() in drivers for each command.
			 */
318
			sg_unmark_end(*sg);
319 320 321 322 323 324
			*sg = sg_next(*sg);
		}

		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
		(*nsegs)++;
	}
325
	*bvprv = *bvec;
326 327
}

328 329 330
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
			     struct scatterlist *sglist,
			     struct scatterlist **sg)
331
{
332
	struct bio_vec bvec, bvprv = { NULL };
333
	struct bvec_iter iter;
334 335 336
	int nsegs, cluster;

	nsegs = 0;
337
	cluster = blk_queue_cluster(q);
338

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
	if (bio->bi_rw & REQ_DISCARD) {
		/*
		 * This is a hack - drivers should be neither modifying the
		 * biovec, nor relying on bi_vcnt - but because of
		 * blk_add_request_payload(), a discard bio may or may not have
		 * a payload we need to set up here (thank you Christoph) and
		 * bi_vcnt is really the only way of telling if we need to.
		 */

		if (bio->bi_vcnt)
			goto single_segment;

		return 0;
	}

	if (bio->bi_rw & REQ_WRITE_SAME) {
single_segment:
		*sg = sglist;
		bvec = bio_iovec(bio);
		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
		return 1;
	}

	for_each_bio(bio)
		bio_for_each_segment(bvec, bio, iter)
			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
					     &nsegs, &cluster);
366

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
	return nsegs;
}

/*
 * map a request to scatterlist, return number of sg entries setup. Caller
 * must make sure sg can hold rq->nr_phys_segments entries
 */
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
		  struct scatterlist *sglist)
{
	struct scatterlist *sg = NULL;
	int nsegs = 0;

	if (rq->bio)
		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
382 383

	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
384 385 386
	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
		unsigned int pad_len =
			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
387 388 389 390 391

		sg->length += pad_len;
		rq->extra_len += pad_len;
	}

392
	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
393
		if (rq->cmd_flags & REQ_WRITE)
394 395
			memset(q->dma_drain_buffer, 0, q->dma_drain_size);

396 397 398 399 400 401 402
		sg->page_link &= ~0x02;
		sg = sg_next(sg);
		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
			    q->dma_drain_size,
			    ((unsigned long)q->dma_drain_buffer) &
			    (PAGE_SIZE - 1));
		nsegs++;
403
		rq->extra_len += q->dma_drain_size;
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	}

	if (sg)
		sg_mark_end(sg);

	return nsegs;
}
EXPORT_SYMBOL(blk_rq_map_sg);

static inline int ll_new_hw_segment(struct request_queue *q,
				    struct request *req,
				    struct bio *bio)
{
	int nr_phys_segs = bio_phys_segments(q, bio);

419 420 421
	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
		goto no_merge;

422
	if (blk_integrity_merge_bio(q, req, bio) == false)
423
		goto no_merge;
424 425 426 427 428 429 430

	/*
	 * This will form the start of a new hw segment.  Bump both
	 * counters.
	 */
	req->nr_phys_segments += nr_phys_segs;
	return 1;
431 432 433 434 435 436

no_merge:
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
	return 0;
437 438 439 440 441
}

int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio)
{
442 443
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
444 445 446 447 448
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
449
	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
450
		blk_recount_segments(q, req->biotail);
451
	if (!bio_flagged(bio, BIO_SEG_VALID))
452 453 454 455 456
		blk_recount_segments(q, bio);

	return ll_new_hw_segment(q, req, bio);
}

457
int ll_front_merge_fn(struct request_queue *q, struct request *req,
458 459
		      struct bio *bio)
{
460 461
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
462 463 464 465 466
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
467
	if (!bio_flagged(bio, BIO_SEG_VALID))
468
		blk_recount_segments(q, bio);
469
	if (!bio_flagged(req->bio, BIO_SEG_VALID))
470 471 472 473 474
		blk_recount_segments(q, req->bio);

	return ll_new_hw_segment(q, req, bio);
}

475 476 477 478 479 480 481 482 483 484 485
/*
 * blk-mq uses req->special to carry normal driver per-request payload, it
 * does not indicate a prepared command that we cannot merge with.
 */
static bool req_no_special_merge(struct request *req)
{
	struct request_queue *q = req->q;

	return !q->mq_ops && req->special;
}

486
static int req_gap_to_prev(struct request *req, struct bio *next)
487 488 489
{
	struct bio *prev = req->biotail;

490
	return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
491
			next->bi_io_vec[0].bv_offset);
492 493
}

494 495 496 497
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
				struct request *next)
{
	int total_phys_segments;
498 499
	unsigned int seg_size =
		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
500 501 502 503 504

	/*
	 * First check if the either of the requests are re-queued
	 * requests.  Can't merge them if they are.
	 */
505
	if (req_no_special_merge(req) || req_no_special_merge(next))
506 507
		return 0;

508
	if (req_gap_to_prev(req, next->bio))
509 510
		return 0;

511 512 513
	/*
	 * Will it become too large?
	 */
514 515
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
	    blk_rq_get_max_sectors(req))
516 517 518
		return 0;

	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
519 520 521 522 523
	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
		if (req->nr_phys_segments == 1)
			req->bio->bi_seg_front_size = seg_size;
		if (next->nr_phys_segments == 1)
			next->biotail->bi_seg_back_size = seg_size;
524
		total_phys_segments--;
525
	}
526

527
	if (total_phys_segments > queue_max_segments(q))
528 529
		return 0;

530
	if (blk_integrity_merge_rq(q, req, next) == false)
531 532
		return 0;

533 534 535 536 537
	/* Merge is OK... */
	req->nr_phys_segments = total_phys_segments;
	return 1;
}

538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
/**
 * blk_rq_set_mixed_merge - mark a request as mixed merge
 * @rq: request to mark as mixed merge
 *
 * Description:
 *     @rq is about to be mixed merged.  Make sure the attributes
 *     which can be mixed are set in each bio and mark @rq as mixed
 *     merged.
 */
void blk_rq_set_mixed_merge(struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	struct bio *bio;

	if (rq->cmd_flags & REQ_MIXED_MERGE)
		return;

	/*
	 * @rq will no longer represent mixable attributes for all the
	 * contained bios.  It will just track those of the first one.
	 * Distributes the attributs to each bio.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
		bio->bi_rw |= ff;
	}
	rq->cmd_flags |= REQ_MIXED_MERGE;
}

568 569 570 571 572 573 574
static void blk_account_io_merge(struct request *req)
{
	if (blk_do_io_stat(req)) {
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
575
		part = req->part;
576 577

		part_round_stats(cpu, part);
578
		part_dec_in_flight(part, rq_data_dir(req));
579

580
		hd_struct_put(part);
581 582 583 584
		part_stat_unlock();
	}
}

585 586 587 588 589 590 591 592 593
/*
 * Has to be called with the request spinlock acquired
 */
static int attempt_merge(struct request_queue *q, struct request *req,
			  struct request *next)
{
	if (!rq_mergeable(req) || !rq_mergeable(next))
		return 0;

594 595 596
	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
		return 0;

597 598 599
	/*
	 * not contiguous
	 */
600
	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
601 602 603 604
		return 0;

	if (rq_data_dir(req) != rq_data_dir(next)
	    || req->rq_disk != next->rq_disk
605
	    || req_no_special_merge(next))
606 607
		return 0;

608 609 610 611
	if (req->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(req->bio, next->bio))
		return 0;

612 613 614 615 616 617 618 619 620
	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
	 * will have updated segment counts, update sector
	 * counts here.
	 */
	if (!ll_merge_requests_fn(q, req, next))
		return 0;

621 622 623 624 625 626 627 628 629 630 631 632 633
	/*
	 * If failfast settings disagree or any of the two is already
	 * a mixed merge, mark both as mixed before proceeding.  This
	 * makes sure that all involved bios have mixable attributes
	 * set properly.
	 */
	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
		blk_rq_set_mixed_merge(req);
		blk_rq_set_mixed_merge(next);
	}

634 635 636 637 638 639 640 641 642 643 644 645
	/*
	 * At this point we have either done a back merge
	 * or front merge. We need the smaller start_time of
	 * the merged requests to be the current request
	 * for accounting purposes.
	 */
	if (time_after(req->start_time, next->start_time))
		req->start_time = next->start_time;

	req->biotail->bi_next = next->bio;
	req->biotail = next->biotail;

646
	req->__data_len += blk_rq_bytes(next);
647 648 649

	elv_merge_requests(q, req, next);

650 651 652 653
	/*
	 * 'next' is going away, so update stats accordingly
	 */
	blk_account_io_merge(next);
654 655

	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
656 657
	if (blk_rq_cpu_valid(next))
		req->cpu = next->cpu;
658

659 660
	/* owner-ship of bio passed from next to req */
	next->bio = NULL;
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
	__blk_put_request(q, next);
	return 1;
}

int attempt_back_merge(struct request_queue *q, struct request *rq)
{
	struct request *next = elv_latter_request(q, rq);

	if (next)
		return attempt_merge(q, rq, next);

	return 0;
}

int attempt_front_merge(struct request_queue *q, struct request *rq)
{
	struct request *prev = elv_former_request(q, rq);

	if (prev)
		return attempt_merge(q, prev, rq);

	return 0;
}
684 685 686 687 688 689

int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
			  struct request *next)
{
	return attempt_merge(q, rq, next);
}
690 691 692

bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
693
	if (!rq_mergeable(rq) || !bio_mergeable(bio))
694 695
		return false;

696 697 698
	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
		return false;

699 700 701 702 703
	/* different data direction or already started, don't merge */
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return false;

	/* must be same device and not a special request */
704
	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
705 706 707
		return false;

	/* only merge integrity protected bio into ditto rq */
708
	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
709 710
		return false;

711 712 713 714 715
	/* must be using the same buffer */
	if (rq->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

716
	/* Only check gaps if the bio carries data */
717 718
	if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
		return false;
719

720 721 722 723 724
	return true;
}

int blk_try_merge(struct request *rq, struct bio *bio)
{
725
	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
726
		return ELEVATOR_BACK_MERGE;
727
	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
728 729 730
		return ELEVATOR_FRONT_MERGE;
	return ELEVATOR_NO_MERGE;
}