blk-merge.c 17.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Functions related to segment and merge handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include "blk.h"

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
static struct bio *blk_bio_discard_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	unsigned int max_discard_sectors, granularity;
	int alignment;
	sector_t tmp;
	unsigned split_sectors;

	/* Zero-sector (unknown) and one-sector granularities are the same.  */
	granularity = max(q->limits.discard_granularity >> 9, 1U);

	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	max_discard_sectors -= max_discard_sectors % granularity;

	if (unlikely(!max_discard_sectors)) {
		/* XXX: warn */
		return NULL;
	}

	if (bio_sectors(bio) <= max_discard_sectors)
		return NULL;

	split_sectors = max_discard_sectors;

	/*
	 * If the next starting sector would be misaligned, stop the discard at
	 * the previous aligned sector.
	 */
	alignment = (q->limits.discard_alignment >> 9) % granularity;

	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
	tmp = sector_div(tmp, granularity);

	if (split_sectors > tmp)
		split_sectors -= tmp;

	return bio_split(bio, split_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_write_same_split(struct request_queue *q,
					    struct bio *bio,
					    struct bio_set *bs)
{
	if (!q->limits.max_write_same_sectors)
		return NULL;

	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
		return NULL;

	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_segment_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	struct bio *split;
	struct bio_vec bv, bvprv;
	struct bvec_iter iter;
72
	unsigned seg_size = 0, nsegs = 0, sectors = 0;
73 74 75
	int prev = 0;

	bio_for_each_segment(bv, bio, iter) {
76
		sectors += bv.bv_len >> 9;
77

78
		if (sectors > queue_max_sectors(q))
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
			goto split;

		/*
		 * If the queue doesn't support SG gaps and adding this
		 * offset would create a gap, disallow it.
		 */
		if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
		    prev && bvec_gap_to_prev(&bvprv, bv.bv_offset))
			goto split;

		if (prev && blk_queue_cluster(q)) {
			if (seg_size + bv.bv_len > queue_max_segment_size(q))
				goto new_segment;
			if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
				goto new_segment;
			if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
				goto new_segment;

			seg_size += bv.bv_len;
			bvprv = bv;
			prev = 1;
			continue;
		}
new_segment:
		if (nsegs == queue_max_segments(q))
			goto split;

		nsegs++;
		bvprv = bv;
		prev = 1;
		seg_size = bv.bv_len;
	}

	return NULL;
split:
	split = bio_clone_bioset(bio, GFP_NOIO, bs);

	split->bi_iter.bi_size -= iter.bi_size;
	bio->bi_iter = iter;

	if (bio_integrity(bio)) {
		bio_integrity_advance(bio, split->bi_iter.bi_size);
		bio_integrity_trim(split, 0, bio_sectors(split));
	}

	return split;
}

void blk_queue_split(struct request_queue *q, struct bio **bio,
		     struct bio_set *bs)
{
	struct bio *split;

	if ((*bio)->bi_rw & REQ_DISCARD)
		split = blk_bio_discard_split(q, *bio, bs);
	else if ((*bio)->bi_rw & REQ_WRITE_SAME)
		split = blk_bio_write_same_split(q, *bio, bs);
	else
		split = blk_bio_segment_split(q, *bio, q->bio_split);

	if (split) {
		bio_chain(split, *bio);
		generic_make_request(*bio);
		*bio = split;
	}
}
EXPORT_SYMBOL(blk_queue_split);

147
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
Ming Lei's avatar
Ming Lei committed
148 149
					     struct bio *bio,
					     bool no_sg_merge)
150
{
151
	struct bio_vec bv, bvprv = { NULL };
152
	int cluster, prev = 0;
153
	unsigned int seg_size, nr_phys_segs;
154
	struct bio *fbio, *bbio;
155
	struct bvec_iter iter;
156

157 158
	if (!bio)
		return 0;
159

160 161 162 163 164 165 166 167 168 169
	/*
	 * This should probably be returning 0, but blk_add_request_payload()
	 * (Christoph!!!!)
	 */
	if (bio->bi_rw & REQ_DISCARD)
		return 1;

	if (bio->bi_rw & REQ_WRITE_SAME)
		return 1;

170
	fbio = bio;
171
	cluster = blk_queue_cluster(q);
Mikulas Patocka's avatar
Mikulas Patocka committed
172
	seg_size = 0;
173
	nr_phys_segs = 0;
174
	for_each_bio(bio) {
175
		bio_for_each_segment(bv, bio, iter) {
176 177 178 179 180 181 182
			/*
			 * If SG merging is disabled, each bio vector is
			 * a segment
			 */
			if (no_sg_merge)
				goto new_segment;

183
			if (prev && cluster) {
184
				if (seg_size + bv.bv_len
185
				    > queue_max_segment_size(q))
186
					goto new_segment;
187
				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
188
					goto new_segment;
189
				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
190
					goto new_segment;
191

192
				seg_size += bv.bv_len;
193 194 195
				bvprv = bv;
				continue;
			}
196
new_segment:
197 198 199
			if (nr_phys_segs == 1 && seg_size >
			    fbio->bi_seg_front_size)
				fbio->bi_seg_front_size = seg_size;
200

201 202
			nr_phys_segs++;
			bvprv = bv;
203
			prev = 1;
204
			seg_size = bv.bv_len;
205
		}
206
		bbio = bio;
207 208
	}

209 210 211 212
	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
		fbio->bi_seg_front_size = seg_size;
	if (seg_size > bbio->bi_seg_back_size)
		bbio->bi_seg_back_size = seg_size;
213 214 215 216 217 218

	return nr_phys_segs;
}

void blk_recalc_rq_segments(struct request *rq)
{
Ming Lei's avatar
Ming Lei committed
219 220 221 222 223
	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
			&rq->q->queue_flags);

	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
			no_sg_merge);
224 225 226 227
}

void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
228 229 230 231 232 233 234
	unsigned short seg_cnt;

	/* estimate segment number by bi_vcnt for non-cloned bio */
	if (bio_flagged(bio, BIO_CLONED))
		seg_cnt = bio_segments(bio);
	else
		seg_cnt = bio->bi_vcnt;
235

236 237 238
	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
			(seg_cnt < queue_max_segments(q)))
		bio->bi_phys_segments = seg_cnt;
239 240 241 242
	else {
		struct bio *nxt = bio->bi_next;

		bio->bi_next = NULL;
243
		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
244 245
		bio->bi_next = nxt;
	}
246

247
	bio_set_flag(bio, BIO_SEG_VALID);
248 249 250 251 252 253
}
EXPORT_SYMBOL(blk_recount_segments);

static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
254
	struct bio_vec end_bv = { NULL }, nxt_bv;
255 256
	struct bvec_iter iter;

257
	if (!blk_queue_cluster(q))
258 259
		return 0;

260
	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
261
	    queue_max_segment_size(q))
262 263
		return 0;

264 265 266
	if (!bio_has_data(bio))
		return 1;

267 268 269 270 271 272 273
	bio_for_each_segment(end_bv, bio, iter)
		if (end_bv.bv_len == iter.bi_size)
			break;

	nxt_bv = bio_iovec(nxt);

	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
274 275
		return 0;

276
	/*
277
	 * bio and nxt are contiguous in memory; check if the queue allows
278 279
	 * these two to be merged into one
	 */
280
	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
281 282 283 284 285
		return 1;

	return 0;
}

286
static inline void
287
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
288
		     struct scatterlist *sglist, struct bio_vec *bvprv,
289 290 291 292 293
		     struct scatterlist **sg, int *nsegs, int *cluster)
{

	int nbytes = bvec->bv_len;

294
	if (*sg && *cluster) {
295 296 297
		if ((*sg)->length + nbytes > queue_max_segment_size(q))
			goto new_segment;

298
		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
299
			goto new_segment;
300
		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
			goto new_segment;

		(*sg)->length += nbytes;
	} else {
new_segment:
		if (!*sg)
			*sg = sglist;
		else {
			/*
			 * If the driver previously mapped a shorter
			 * list, we could see a termination bit
			 * prematurely unless it fully inits the sg
			 * table on each mapping. We KNOW that there
			 * must be more entries here or the driver
			 * would be buggy, so force clear the
			 * termination bit to avoid doing a full
			 * sg_init_table() in drivers for each command.
			 */
319
			sg_unmark_end(*sg);
320 321 322 323 324 325
			*sg = sg_next(*sg);
		}

		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
		(*nsegs)++;
	}
326
	*bvprv = *bvec;
327 328
}

329 330 331
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
			     struct scatterlist *sglist,
			     struct scatterlist **sg)
332
{
333
	struct bio_vec bvec, bvprv = { NULL };
334
	struct bvec_iter iter;
335 336 337
	int nsegs, cluster;

	nsegs = 0;
338
	cluster = blk_queue_cluster(q);
339

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	if (bio->bi_rw & REQ_DISCARD) {
		/*
		 * This is a hack - drivers should be neither modifying the
		 * biovec, nor relying on bi_vcnt - but because of
		 * blk_add_request_payload(), a discard bio may or may not have
		 * a payload we need to set up here (thank you Christoph) and
		 * bi_vcnt is really the only way of telling if we need to.
		 */

		if (bio->bi_vcnt)
			goto single_segment;

		return 0;
	}

	if (bio->bi_rw & REQ_WRITE_SAME) {
single_segment:
		*sg = sglist;
		bvec = bio_iovec(bio);
		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
		return 1;
	}

	for_each_bio(bio)
		bio_for_each_segment(bvec, bio, iter)
			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
					     &nsegs, &cluster);
367

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
	return nsegs;
}

/*
 * map a request to scatterlist, return number of sg entries setup. Caller
 * must make sure sg can hold rq->nr_phys_segments entries
 */
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
		  struct scatterlist *sglist)
{
	struct scatterlist *sg = NULL;
	int nsegs = 0;

	if (rq->bio)
		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
383 384

	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
385 386 387
	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
		unsigned int pad_len =
			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
388 389 390 391 392

		sg->length += pad_len;
		rq->extra_len += pad_len;
	}

393
	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
394
		if (rq->cmd_flags & REQ_WRITE)
395 396
			memset(q->dma_drain_buffer, 0, q->dma_drain_size);

397 398 399 400 401 402 403
		sg->page_link &= ~0x02;
		sg = sg_next(sg);
		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
			    q->dma_drain_size,
			    ((unsigned long)q->dma_drain_buffer) &
			    (PAGE_SIZE - 1));
		nsegs++;
404
		rq->extra_len += q->dma_drain_size;
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	}

	if (sg)
		sg_mark_end(sg);

	return nsegs;
}
EXPORT_SYMBOL(blk_rq_map_sg);

static inline int ll_new_hw_segment(struct request_queue *q,
				    struct request *req,
				    struct bio *bio)
{
	int nr_phys_segs = bio_phys_segments(q, bio);

420 421 422
	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
		goto no_merge;

423
	if (blk_integrity_merge_bio(q, req, bio) == false)
424
		goto no_merge;
425 426 427 428 429 430 431

	/*
	 * This will form the start of a new hw segment.  Bump both
	 * counters.
	 */
	req->nr_phys_segments += nr_phys_segs;
	return 1;
432 433 434 435 436 437

no_merge:
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
	return 0;
438 439 440 441 442
}

int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio)
{
443 444
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
445 446 447 448 449
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
450
	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
451
		blk_recount_segments(q, req->biotail);
452
	if (!bio_flagged(bio, BIO_SEG_VALID))
453 454 455 456 457
		blk_recount_segments(q, bio);

	return ll_new_hw_segment(q, req, bio);
}

458
int ll_front_merge_fn(struct request_queue *q, struct request *req,
459 460
		      struct bio *bio)
{
461 462
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
463 464 465 466 467
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
468
	if (!bio_flagged(bio, BIO_SEG_VALID))
469
		blk_recount_segments(q, bio);
470
	if (!bio_flagged(req->bio, BIO_SEG_VALID))
471 472 473 474 475
		blk_recount_segments(q, req->bio);

	return ll_new_hw_segment(q, req, bio);
}

476 477 478 479 480 481 482 483 484 485 486
/*
 * blk-mq uses req->special to carry normal driver per-request payload, it
 * does not indicate a prepared command that we cannot merge with.
 */
static bool req_no_special_merge(struct request *req)
{
	struct request_queue *q = req->q;

	return !q->mq_ops && req->special;
}

487 488 489 490 491 492 493 494
static int req_gap_to_prev(struct request *req, struct request *next)
{
	struct bio *prev = req->biotail;

	return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1],
				next->bio->bi_io_vec[0].bv_offset);
}

495 496 497 498
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
				struct request *next)
{
	int total_phys_segments;
499 500
	unsigned int seg_size =
		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
501 502 503 504 505

	/*
	 * First check if the either of the requests are re-queued
	 * requests.  Can't merge them if they are.
	 */
506
	if (req_no_special_merge(req) || req_no_special_merge(next))
507 508
		return 0;

509 510 511 512
	if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) &&
	    req_gap_to_prev(req, next))
		return 0;

513 514 515
	/*
	 * Will it become too large?
	 */
516 517
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
	    blk_rq_get_max_sectors(req))
518 519 520
		return 0;

	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
521 522 523 524 525
	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
		if (req->nr_phys_segments == 1)
			req->bio->bi_seg_front_size = seg_size;
		if (next->nr_phys_segments == 1)
			next->biotail->bi_seg_back_size = seg_size;
526
		total_phys_segments--;
527
	}
528

529
	if (total_phys_segments > queue_max_segments(q))
530 531
		return 0;

532
	if (blk_integrity_merge_rq(q, req, next) == false)
533 534
		return 0;

535 536 537 538 539
	/* Merge is OK... */
	req->nr_phys_segments = total_phys_segments;
	return 1;
}

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
/**
 * blk_rq_set_mixed_merge - mark a request as mixed merge
 * @rq: request to mark as mixed merge
 *
 * Description:
 *     @rq is about to be mixed merged.  Make sure the attributes
 *     which can be mixed are set in each bio and mark @rq as mixed
 *     merged.
 */
void blk_rq_set_mixed_merge(struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	struct bio *bio;

	if (rq->cmd_flags & REQ_MIXED_MERGE)
		return;

	/*
	 * @rq will no longer represent mixable attributes for all the
	 * contained bios.  It will just track those of the first one.
	 * Distributes the attributs to each bio.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
		bio->bi_rw |= ff;
	}
	rq->cmd_flags |= REQ_MIXED_MERGE;
}

570 571 572 573 574 575 576
static void blk_account_io_merge(struct request *req)
{
	if (blk_do_io_stat(req)) {
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
577
		part = req->part;
578 579

		part_round_stats(cpu, part);
580
		part_dec_in_flight(part, rq_data_dir(req));
581

582
		hd_struct_put(part);
583 584 585 586
		part_stat_unlock();
	}
}

587 588 589 590 591 592 593 594 595
/*
 * Has to be called with the request spinlock acquired
 */
static int attempt_merge(struct request_queue *q, struct request *req,
			  struct request *next)
{
	if (!rq_mergeable(req) || !rq_mergeable(next))
		return 0;

596 597 598
	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
		return 0;

599 600 601
	/*
	 * not contiguous
	 */
602
	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
603 604 605 606
		return 0;

	if (rq_data_dir(req) != rq_data_dir(next)
	    || req->rq_disk != next->rq_disk
607
	    || req_no_special_merge(next))
608 609
		return 0;

610 611 612 613
	if (req->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(req->bio, next->bio))
		return 0;

614 615 616 617 618 619 620 621 622
	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
	 * will have updated segment counts, update sector
	 * counts here.
	 */
	if (!ll_merge_requests_fn(q, req, next))
		return 0;

623 624 625 626 627 628 629 630 631 632 633 634 635
	/*
	 * If failfast settings disagree or any of the two is already
	 * a mixed merge, mark both as mixed before proceeding.  This
	 * makes sure that all involved bios have mixable attributes
	 * set properly.
	 */
	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
		blk_rq_set_mixed_merge(req);
		blk_rq_set_mixed_merge(next);
	}

636 637 638 639 640 641 642 643 644 645 646 647
	/*
	 * At this point we have either done a back merge
	 * or front merge. We need the smaller start_time of
	 * the merged requests to be the current request
	 * for accounting purposes.
	 */
	if (time_after(req->start_time, next->start_time))
		req->start_time = next->start_time;

	req->biotail->bi_next = next->bio;
	req->biotail = next->biotail;

648
	req->__data_len += blk_rq_bytes(next);
649 650 651

	elv_merge_requests(q, req, next);

652 653 654 655
	/*
	 * 'next' is going away, so update stats accordingly
	 */
	blk_account_io_merge(next);
656 657

	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
658 659
	if (blk_rq_cpu_valid(next))
		req->cpu = next->cpu;
660

661 662
	/* owner-ship of bio passed from next to req */
	next->bio = NULL;
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
	__blk_put_request(q, next);
	return 1;
}

int attempt_back_merge(struct request_queue *q, struct request *rq)
{
	struct request *next = elv_latter_request(q, rq);

	if (next)
		return attempt_merge(q, rq, next);

	return 0;
}

int attempt_front_merge(struct request_queue *q, struct request *rq)
{
	struct request *prev = elv_former_request(q, rq);

	if (prev)
		return attempt_merge(q, prev, rq);

	return 0;
}
686 687 688 689 690 691

int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
			  struct request *next)
{
	return attempt_merge(q, rq, next);
}
692 693 694

bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
695 696
	struct request_queue *q = rq->q;

697
	if (!rq_mergeable(rq) || !bio_mergeable(bio))
698 699
		return false;

700 701 702
	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
		return false;

703 704 705 706 707
	/* different data direction or already started, don't merge */
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return false;

	/* must be same device and not a special request */
708
	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
709 710 711
		return false;

	/* only merge integrity protected bio into ditto rq */
712
	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
713 714
		return false;

715 716 717 718 719
	/* must be using the same buffer */
	if (rq->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

720 721
	/* Only check gaps if the bio carries data */
	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && bio_has_data(bio)) {
722 723
		struct bio_vec *bprev;

Wenbo Wang's avatar
Wenbo Wang committed
724
		bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
725 726 727 728
		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
			return false;
	}

729 730 731 732 733
	return true;
}

int blk_try_merge(struct request *rq, struct bio *bio)
{
734
	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
735
		return ELEVATOR_BACK_MERGE;
736
	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
737 738 739
		return ELEVATOR_FRONT_MERGE;
	return ELEVATOR_NO_MERGE;
}