blk-merge.c 17.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Functions related to segment and merge handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include "blk.h"

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static struct bio *blk_bio_discard_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	unsigned int max_discard_sectors, granularity;
	int alignment;
	sector_t tmp;
	unsigned split_sectors;

	/* Zero-sector (unknown) and one-sector granularities are the same.  */
	granularity = max(q->limits.discard_granularity >> 9, 1U);

	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	max_discard_sectors -= max_discard_sectors % granularity;

	if (unlikely(!max_discard_sectors)) {
		/* XXX: warn */
		return NULL;
	}

	if (bio_sectors(bio) <= max_discard_sectors)
		return NULL;

	split_sectors = max_discard_sectors;

	/*
	 * If the next starting sector would be misaligned, stop the discard at
	 * the previous aligned sector.
	 */
	alignment = (q->limits.discard_alignment >> 9) % granularity;

	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
	tmp = sector_div(tmp, granularity);

	if (split_sectors > tmp)
		split_sectors -= tmp;

	return bio_split(bio, split_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_write_same_split(struct request_queue *q,
					    struct bio *bio,
					    struct bio_set *bs)
{
	if (!q->limits.max_write_same_sectors)
		return NULL;

	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
		return NULL;

	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_segment_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	struct bio *split;
	struct bio_vec bv, bvprv;
	struct bvec_iter iter;
	unsigned seg_size = 0, nsegs = 0;
	int prev = 0;

	struct bvec_merge_data bvm = {
		.bi_bdev	= bio->bi_bdev,
		.bi_sector	= bio->bi_iter.bi_sector,
		.bi_size	= 0,
		.bi_rw		= bio->bi_rw,
	};

	bio_for_each_segment(bv, bio, iter) {
		if (q->merge_bvec_fn &&
		    q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
			goto split;

		bvm.bi_size += bv.bv_len;

		if (bvm.bi_size >> 9 > queue_max_sectors(q))
			goto split;

		/*
		 * If the queue doesn't support SG gaps and adding this
		 * offset would create a gap, disallow it.
		 */
		if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
		    prev && bvec_gap_to_prev(&bvprv, bv.bv_offset))
			goto split;

		if (prev && blk_queue_cluster(q)) {
			if (seg_size + bv.bv_len > queue_max_segment_size(q))
				goto new_segment;
			if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
				goto new_segment;
			if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
				goto new_segment;

			seg_size += bv.bv_len;
			bvprv = bv;
			prev = 1;
			continue;
		}
new_segment:
		if (nsegs == queue_max_segments(q))
			goto split;

		nsegs++;
		bvprv = bv;
		prev = 1;
		seg_size = bv.bv_len;
	}

	return NULL;
split:
	split = bio_clone_bioset(bio, GFP_NOIO, bs);

	split->bi_iter.bi_size -= iter.bi_size;
	bio->bi_iter = iter;

	if (bio_integrity(bio)) {
		bio_integrity_advance(bio, split->bi_iter.bi_size);
		bio_integrity_trim(split, 0, bio_sectors(split));
	}

	return split;
}

void blk_queue_split(struct request_queue *q, struct bio **bio,
		     struct bio_set *bs)
{
	struct bio *split;

	if ((*bio)->bi_rw & REQ_DISCARD)
		split = blk_bio_discard_split(q, *bio, bs);
	else if ((*bio)->bi_rw & REQ_WRITE_SAME)
		split = blk_bio_write_same_split(q, *bio, bs);
	else
		split = blk_bio_segment_split(q, *bio, q->bio_split);

	if (split) {
		bio_chain(split, *bio);
		generic_make_request(*bio);
		*bio = split;
	}
}
EXPORT_SYMBOL(blk_queue_split);

158
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
159 160
					     struct bio *bio,
					     bool no_sg_merge)
161
{
162
	struct bio_vec bv, bvprv = { NULL };
163
	int cluster, prev = 0;
164
	unsigned int seg_size, nr_phys_segs;
165
	struct bio *fbio, *bbio;
166
	struct bvec_iter iter;
167

168 169
	if (!bio)
		return 0;
170

171 172 173 174 175 176 177 178 179 180
	/*
	 * This should probably be returning 0, but blk_add_request_payload()
	 * (Christoph!!!!)
	 */
	if (bio->bi_rw & REQ_DISCARD)
		return 1;

	if (bio->bi_rw & REQ_WRITE_SAME)
		return 1;

181
	fbio = bio;
182
	cluster = blk_queue_cluster(q);
Mikulas Patocka's avatar
Mikulas Patocka committed
183
	seg_size = 0;
184
	nr_phys_segs = 0;
185
	for_each_bio(bio) {
186
		bio_for_each_segment(bv, bio, iter) {
187 188 189 190 191 192 193
			/*
			 * If SG merging is disabled, each bio vector is
			 * a segment
			 */
			if (no_sg_merge)
				goto new_segment;

194
			if (prev && cluster) {
195
				if (seg_size + bv.bv_len
196
				    > queue_max_segment_size(q))
197
					goto new_segment;
198
				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
199
					goto new_segment;
200
				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
201
					goto new_segment;
202

203
				seg_size += bv.bv_len;
204 205 206
				bvprv = bv;
				continue;
			}
207
new_segment:
208 209 210
			if (nr_phys_segs == 1 && seg_size >
			    fbio->bi_seg_front_size)
				fbio->bi_seg_front_size = seg_size;
211

212 213
			nr_phys_segs++;
			bvprv = bv;
214
			prev = 1;
215
			seg_size = bv.bv_len;
216
		}
217
		bbio = bio;
218 219
	}

220 221 222 223
	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
		fbio->bi_seg_front_size = seg_size;
	if (seg_size > bbio->bi_seg_back_size)
		bbio->bi_seg_back_size = seg_size;
224 225 226 227 228 229

	return nr_phys_segs;
}

void blk_recalc_rq_segments(struct request *rq)
{
230 231 232 233 234
	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
			&rq->q->queue_flags);

	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
			no_sg_merge);
235 236 237 238
}

void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
239 240 241 242 243 244 245
	unsigned short seg_cnt;

	/* estimate segment number by bi_vcnt for non-cloned bio */
	if (bio_flagged(bio, BIO_CLONED))
		seg_cnt = bio_segments(bio);
	else
		seg_cnt = bio->bi_vcnt;
246

247 248 249
	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
			(seg_cnt < queue_max_segments(q)))
		bio->bi_phys_segments = seg_cnt;
250 251 252 253
	else {
		struct bio *nxt = bio->bi_next;

		bio->bi_next = NULL;
254
		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
255 256
		bio->bi_next = nxt;
	}
257

258
	bio_set_flag(bio, BIO_SEG_VALID);
259 260 261 262 263 264
}
EXPORT_SYMBOL(blk_recount_segments);

static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
265
	struct bio_vec end_bv = { NULL }, nxt_bv;
266 267
	struct bvec_iter iter;

268
	if (!blk_queue_cluster(q))
269 270
		return 0;

271
	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
272
	    queue_max_segment_size(q))
273 274
		return 0;

275 276 277
	if (!bio_has_data(bio))
		return 1;

278 279 280 281 282 283 284
	bio_for_each_segment(end_bv, bio, iter)
		if (end_bv.bv_len == iter.bi_size)
			break;

	nxt_bv = bio_iovec(nxt);

	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
285 286
		return 0;

287
	/*
288
	 * bio and nxt are contiguous in memory; check if the queue allows
289 290
	 * these two to be merged into one
	 */
291
	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
292 293 294 295 296
		return 1;

	return 0;
}

297
static inline void
298
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
299
		     struct scatterlist *sglist, struct bio_vec *bvprv,
300 301 302 303 304
		     struct scatterlist **sg, int *nsegs, int *cluster)
{

	int nbytes = bvec->bv_len;

305
	if (*sg && *cluster) {
306 307 308
		if ((*sg)->length + nbytes > queue_max_segment_size(q))
			goto new_segment;

309
		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
310
			goto new_segment;
311
		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
			goto new_segment;

		(*sg)->length += nbytes;
	} else {
new_segment:
		if (!*sg)
			*sg = sglist;
		else {
			/*
			 * If the driver previously mapped a shorter
			 * list, we could see a termination bit
			 * prematurely unless it fully inits the sg
			 * table on each mapping. We KNOW that there
			 * must be more entries here or the driver
			 * would be buggy, so force clear the
			 * termination bit to avoid doing a full
			 * sg_init_table() in drivers for each command.
			 */
330
			sg_unmark_end(*sg);
331 332 333 334 335 336
			*sg = sg_next(*sg);
		}

		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
		(*nsegs)++;
	}
337
	*bvprv = *bvec;
338 339
}

340 341 342
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
			     struct scatterlist *sglist,
			     struct scatterlist **sg)
343
{
344
	struct bio_vec bvec, bvprv = { NULL };
345
	struct bvec_iter iter;
346 347 348
	int nsegs, cluster;

	nsegs = 0;
349
	cluster = blk_queue_cluster(q);
350

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
	if (bio->bi_rw & REQ_DISCARD) {
		/*
		 * This is a hack - drivers should be neither modifying the
		 * biovec, nor relying on bi_vcnt - but because of
		 * blk_add_request_payload(), a discard bio may or may not have
		 * a payload we need to set up here (thank you Christoph) and
		 * bi_vcnt is really the only way of telling if we need to.
		 */

		if (bio->bi_vcnt)
			goto single_segment;

		return 0;
	}

	if (bio->bi_rw & REQ_WRITE_SAME) {
single_segment:
		*sg = sglist;
		bvec = bio_iovec(bio);
		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
		return 1;
	}

	for_each_bio(bio)
		bio_for_each_segment(bvec, bio, iter)
			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
					     &nsegs, &cluster);
378

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	return nsegs;
}

/*
 * map a request to scatterlist, return number of sg entries setup. Caller
 * must make sure sg can hold rq->nr_phys_segments entries
 */
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
		  struct scatterlist *sglist)
{
	struct scatterlist *sg = NULL;
	int nsegs = 0;

	if (rq->bio)
		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
394 395

	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
396 397 398
	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
		unsigned int pad_len =
			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
399 400 401 402 403

		sg->length += pad_len;
		rq->extra_len += pad_len;
	}

404
	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
405
		if (rq->cmd_flags & REQ_WRITE)
406 407
			memset(q->dma_drain_buffer, 0, q->dma_drain_size);

408 409 410 411 412 413 414
		sg->page_link &= ~0x02;
		sg = sg_next(sg);
		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
			    q->dma_drain_size,
			    ((unsigned long)q->dma_drain_buffer) &
			    (PAGE_SIZE - 1));
		nsegs++;
415
		rq->extra_len += q->dma_drain_size;
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
	}

	if (sg)
		sg_mark_end(sg);

	return nsegs;
}
EXPORT_SYMBOL(blk_rq_map_sg);

static inline int ll_new_hw_segment(struct request_queue *q,
				    struct request *req,
				    struct bio *bio)
{
	int nr_phys_segs = bio_phys_segments(q, bio);

431 432 433
	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
		goto no_merge;

434
	if (blk_integrity_merge_bio(q, req, bio) == false)
435
		goto no_merge;
436 437 438 439 440 441 442

	/*
	 * This will form the start of a new hw segment.  Bump both
	 * counters.
	 */
	req->nr_phys_segments += nr_phys_segs;
	return 1;
443 444 445 446 447 448

no_merge:
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
	return 0;
449 450 451 452 453
}

int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio)
{
454 455
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
456 457 458 459 460
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
461
	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
462
		blk_recount_segments(q, req->biotail);
463
	if (!bio_flagged(bio, BIO_SEG_VALID))
464 465 466 467 468
		blk_recount_segments(q, bio);

	return ll_new_hw_segment(q, req, bio);
}

469
int ll_front_merge_fn(struct request_queue *q, struct request *req,
470 471
		      struct bio *bio)
{
472 473
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
474 475 476 477 478
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
479
	if (!bio_flagged(bio, BIO_SEG_VALID))
480
		blk_recount_segments(q, bio);
481
	if (!bio_flagged(req->bio, BIO_SEG_VALID))
482 483 484 485 486
		blk_recount_segments(q, req->bio);

	return ll_new_hw_segment(q, req, bio);
}

487 488 489 490 491 492 493 494 495 496 497
/*
 * blk-mq uses req->special to carry normal driver per-request payload, it
 * does not indicate a prepared command that we cannot merge with.
 */
static bool req_no_special_merge(struct request *req)
{
	struct request_queue *q = req->q;

	return !q->mq_ops && req->special;
}

498 499 500 501 502 503 504 505
static int req_gap_to_prev(struct request *req, struct request *next)
{
	struct bio *prev = req->biotail;

	return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1],
				next->bio->bi_io_vec[0].bv_offset);
}

506 507 508 509
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
				struct request *next)
{
	int total_phys_segments;
510 511
	unsigned int seg_size =
		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
512 513 514 515 516

	/*
	 * First check if the either of the requests are re-queued
	 * requests.  Can't merge them if they are.
	 */
517
	if (req_no_special_merge(req) || req_no_special_merge(next))
518 519
		return 0;

520 521 522 523
	if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) &&
	    req_gap_to_prev(req, next))
		return 0;

524 525 526
	/*
	 * Will it become too large?
	 */
527 528
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
	    blk_rq_get_max_sectors(req))
529 530 531
		return 0;

	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
532 533 534 535 536
	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
		if (req->nr_phys_segments == 1)
			req->bio->bi_seg_front_size = seg_size;
		if (next->nr_phys_segments == 1)
			next->biotail->bi_seg_back_size = seg_size;
537
		total_phys_segments--;
538
	}
539

540
	if (total_phys_segments > queue_max_segments(q))
541 542
		return 0;

543
	if (blk_integrity_merge_rq(q, req, next) == false)
544 545
		return 0;

546 547 548 549 550
	/* Merge is OK... */
	req->nr_phys_segments = total_phys_segments;
	return 1;
}

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
/**
 * blk_rq_set_mixed_merge - mark a request as mixed merge
 * @rq: request to mark as mixed merge
 *
 * Description:
 *     @rq is about to be mixed merged.  Make sure the attributes
 *     which can be mixed are set in each bio and mark @rq as mixed
 *     merged.
 */
void blk_rq_set_mixed_merge(struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	struct bio *bio;

	if (rq->cmd_flags & REQ_MIXED_MERGE)
		return;

	/*
	 * @rq will no longer represent mixable attributes for all the
	 * contained bios.  It will just track those of the first one.
	 * Distributes the attributs to each bio.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
		bio->bi_rw |= ff;
	}
	rq->cmd_flags |= REQ_MIXED_MERGE;
}

581 582 583 584 585 586 587
static void blk_account_io_merge(struct request *req)
{
	if (blk_do_io_stat(req)) {
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
588
		part = req->part;
589 590

		part_round_stats(cpu, part);
591
		part_dec_in_flight(part, rq_data_dir(req));
592

593
		hd_struct_put(part);
594 595 596 597
		part_stat_unlock();
	}
}

598 599 600 601 602 603 604 605 606
/*
 * Has to be called with the request spinlock acquired
 */
static int attempt_merge(struct request_queue *q, struct request *req,
			  struct request *next)
{
	if (!rq_mergeable(req) || !rq_mergeable(next))
		return 0;

607 608 609
	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
		return 0;

610 611 612
	/*
	 * not contiguous
	 */
613
	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
614 615 616 617
		return 0;

	if (rq_data_dir(req) != rq_data_dir(next)
	    || req->rq_disk != next->rq_disk
618
	    || req_no_special_merge(next))
619 620
		return 0;

621 622 623 624
	if (req->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(req->bio, next->bio))
		return 0;

625 626 627 628 629 630 631 632 633
	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
	 * will have updated segment counts, update sector
	 * counts here.
	 */
	if (!ll_merge_requests_fn(q, req, next))
		return 0;

634 635 636 637 638 639 640 641 642 643 644 645 646
	/*
	 * If failfast settings disagree or any of the two is already
	 * a mixed merge, mark both as mixed before proceeding.  This
	 * makes sure that all involved bios have mixable attributes
	 * set properly.
	 */
	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
		blk_rq_set_mixed_merge(req);
		blk_rq_set_mixed_merge(next);
	}

647 648 649 650 651 652 653 654 655 656 657 658
	/*
	 * At this point we have either done a back merge
	 * or front merge. We need the smaller start_time of
	 * the merged requests to be the current request
	 * for accounting purposes.
	 */
	if (time_after(req->start_time, next->start_time))
		req->start_time = next->start_time;

	req->biotail->bi_next = next->bio;
	req->biotail = next->biotail;

659
	req->__data_len += blk_rq_bytes(next);
660 661 662

	elv_merge_requests(q, req, next);

663 664 665 666
	/*
	 * 'next' is going away, so update stats accordingly
	 */
	blk_account_io_merge(next);
667 668

	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
669 670
	if (blk_rq_cpu_valid(next))
		req->cpu = next->cpu;
671

672 673
	/* owner-ship of bio passed from next to req */
	next->bio = NULL;
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	__blk_put_request(q, next);
	return 1;
}

int attempt_back_merge(struct request_queue *q, struct request *rq)
{
	struct request *next = elv_latter_request(q, rq);

	if (next)
		return attempt_merge(q, rq, next);

	return 0;
}

int attempt_front_merge(struct request_queue *q, struct request *rq)
{
	struct request *prev = elv_former_request(q, rq);

	if (prev)
		return attempt_merge(q, prev, rq);

	return 0;
}
697 698 699 700 701 702

int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
			  struct request *next)
{
	return attempt_merge(q, rq, next);
}
703 704 705

bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
706 707
	struct request_queue *q = rq->q;

708
	if (!rq_mergeable(rq) || !bio_mergeable(bio))
709 710
		return false;

711 712 713
	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
		return false;

714 715 716 717 718
	/* different data direction or already started, don't merge */
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return false;

	/* must be same device and not a special request */
719
	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
720 721 722
		return false;

	/* only merge integrity protected bio into ditto rq */
723
	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
724 725
		return false;

726 727 728 729 730
	/* must be using the same buffer */
	if (rq->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

731 732
	/* Only check gaps if the bio carries data */
	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && bio_has_data(bio)) {
733 734
		struct bio_vec *bprev;

Wenbo Wang's avatar
Wenbo Wang committed
735
		bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
736 737 738 739
		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
			return false;
	}

740 741 742 743 744
	return true;
}

int blk_try_merge(struct request *rq, struct bio *bio)
{
745
	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
746
		return ELEVATOR_BACK_MERGE;
747
	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
748 749 750
		return ELEVATOR_FRONT_MERGE;
	return ELEVATOR_NO_MERGE;
}