raid1.c 91.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11
/*
 * raid1.c : Multiple Devices driver for Linux
 *
 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
 *
 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
 *
 * RAID-1 management functions.
 *
 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
 *
12
 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
Linus Torvalds's avatar
Linus Torvalds committed
13 14
 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
 *
15 16 17 18 19 20 21 22 23
 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
 * bitmapped intelligence in resync:
 *
 *      - bitmap marked during normal i/o
 *      - bitmap used to skip nondirty blocks during sync
 *
 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
 * - persistent bitmap code
 *
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27 28 29 30 31 32 33
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * You should have received a copy of the GNU General Public License
 * (for example /usr/src/linux/COPYING); if not, write to the Free
 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

34
#include <linux/slab.h>
35
#include <linux/delay.h>
36
#include <linux/blkdev.h>
37
#include <linux/module.h>
38
#include <linux/seq_file.h>
39
#include <linux/ratelimit.h>
40

41
#include <trace/events/block.h>
42

43
#include "md.h"
44
#include "raid1.h"
45
#include "md-bitmap.h"
46

47 48
#define UNSUPPORTED_MDDEV_FLAGS		\
	((1L << MD_HAS_JOURNAL) |	\
49
	 (1L << MD_JOURNAL_CLEAN) |	\
50 51
	 (1L << MD_HAS_PPL) |		\
	 (1L << MD_HAS_MULTIPLE_PPLS))
52

Linus Torvalds's avatar
Linus Torvalds committed
53 54 55 56 57
/*
 * Number of guaranteed r1bios in case of extreme VM load:
 */
#define	NR_RAID1_BIOS 256

58 59 60 61 62 63 64 65 66 67 68 69 70 71
/* when we get a read error on a read-only array, we redirect to another
 * device without failing the first device, or trying to over-write to
 * correct the read error.  To keep track of bad blocks on a per-bio
 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
 */
#define IO_BLOCKED ((struct bio *)1)
/* When we successfully write to a known bad-block, we need to remove the
 * bad-block marking which must be done from process context.  So we record
 * the success by setting devs[n].bio to IO_MADE_GOOD
 */
#define IO_MADE_GOOD ((struct bio *)2)

#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)

72 73 74 75 76
/* When there are this many requests queue to be written by
 * the raid1 thread, we become 'congested' to provide back-pressure
 * for writeback.
 */
static int max_queued_requests = 1024;
Linus Torvalds's avatar
Linus Torvalds committed
77

78 79
static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
Linus Torvalds's avatar
Linus Torvalds committed
80

81 82 83
#define raid1_log(md, fmt, args...)				\
	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)

84 85
#include "raid1-10.c"

86 87 88 89 90 91 92 93 94
/*
 * for resync bio, r1bio pointer can be retrieved from the per-bio
 * 'struct resync_pages'.
 */
static inline struct r1bio *get_resync_r1bio(struct bio *bio)
{
	return get_resync_pages(bio)->raid_bio;
}

95
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds's avatar
Linus Torvalds committed
96 97
{
	struct pool_info *pi = data;
98
	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
Linus Torvalds's avatar
Linus Torvalds committed
99 100

	/* allocate a r1bio with room for raid_disks entries in the bios array */
101
	return kzalloc(size, gfp_flags);
Linus Torvalds's avatar
Linus Torvalds committed
102 103 104 105 106 107 108
}

static void r1bio_pool_free(void *r1_bio, void *data)
{
	kfree(r1_bio);
}

109
#define RESYNC_DEPTH 32
Linus Torvalds's avatar
Linus Torvalds committed
110
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
111 112
#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
113 114
#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
Linus Torvalds's avatar
Linus Torvalds committed
115

116
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds's avatar
Linus Torvalds committed
117 118
{
	struct pool_info *pi = data;
119
	struct r1bio *r1_bio;
Linus Torvalds's avatar
Linus Torvalds committed
120
	struct bio *bio;
121
	int need_pages;
122 123
	int j;
	struct resync_pages *rps;
Linus Torvalds's avatar
Linus Torvalds committed
124 125

	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
126
	if (!r1_bio)
Linus Torvalds's avatar
Linus Torvalds committed
127 128
		return NULL;

129 130
	rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
			    gfp_flags);
131 132 133
	if (!rps)
		goto out_free_r1bio;

Linus Torvalds's avatar
Linus Torvalds committed
134 135 136 137
	/*
	 * Allocate bios : 1 for reading, n-1 for writing
	 */
	for (j = pi->raid_disks ; j-- ; ) {
138
		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
Linus Torvalds's avatar
Linus Torvalds committed
139 140 141 142 143 144
		if (!bio)
			goto out_free_bio;
		r1_bio->bios[j] = bio;
	}
	/*
	 * Allocate RESYNC_PAGES data pages and attach them to
145 146 147
	 * the first bio.
	 * If this is a user-requested check/repair, allocate
	 * RESYNC_PAGES for each bio.
Linus Torvalds's avatar
Linus Torvalds committed
148
	 */
149
	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
150
		need_pages = pi->raid_disks;
151
	else
152
		need_pages = 1;
153 154 155
	for (j = 0; j < pi->raid_disks; j++) {
		struct resync_pages *rp = &rps[j];

156 157
		bio = r1_bio->bios[j];

158 159 160 161 162 163 164 165 166 167
		if (j < need_pages) {
			if (resync_alloc_pages(rp, gfp_flags))
				goto out_free_pages;
		} else {
			memcpy(rp, &rps[0], sizeof(*rp));
			resync_get_all_pages(rp);
		}

		rp->raid_bio = r1_bio;
		bio->bi_private = rp;
Linus Torvalds's avatar
Linus Torvalds committed
168 169 170 171 172 173
	}

	r1_bio->master_bio = NULL;

	return r1_bio;

174
out_free_pages:
175
	while (--j >= 0)
176
		resync_free_pages(&rps[j]);
177

Linus Torvalds's avatar
Linus Torvalds committed
178
out_free_bio:
179
	while (++j < pi->raid_disks)
Linus Torvalds's avatar
Linus Torvalds committed
180
		bio_put(r1_bio->bios[j]);
181 182 183
	kfree(rps);

out_free_r1bio:
Linus Torvalds's avatar
Linus Torvalds committed
184 185 186 187 188 189 190
	r1bio_pool_free(r1_bio, data);
	return NULL;
}

static void r1buf_pool_free(void *__r1_bio, void *data)
{
	struct pool_info *pi = data;
191
	int i;
192
	struct r1bio *r1bio = __r1_bio;
193
	struct resync_pages *rp = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
194

195 196 197
	for (i = pi->raid_disks; i--; ) {
		rp = get_resync_pages(r1bio->bios[i]);
		resync_free_pages(rp);
Linus Torvalds's avatar
Linus Torvalds committed
198
		bio_put(r1bio->bios[i]);
199 200 201 202
	}

	/* resync pages array stored in the 1st bio's .bi_private */
	kfree(rp);
Linus Torvalds's avatar
Linus Torvalds committed
203 204 205 206

	r1bio_pool_free(r1bio, data);
}

207
static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
Linus Torvalds's avatar
Linus Torvalds committed
208 209 210
{
	int i;

211
	for (i = 0; i < conf->raid_disks * 2; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
212
		struct bio **bio = r1_bio->bios + i;
213
		if (!BIO_SPECIAL(*bio))
Linus Torvalds's avatar
Linus Torvalds committed
214 215 216 217 218
			bio_put(*bio);
		*bio = NULL;
	}
}

219
static void free_r1bio(struct r1bio *r1_bio)
Linus Torvalds's avatar
Linus Torvalds committed
220
{
221
	struct r1conf *conf = r1_bio->mddev->private;
Linus Torvalds's avatar
Linus Torvalds committed
222 223

	put_all_bios(conf, r1_bio);
224
	mempool_free(r1_bio, &conf->r1bio_pool);
Linus Torvalds's avatar
Linus Torvalds committed
225 226
}

227
static void put_buf(struct r1bio *r1_bio)
Linus Torvalds's avatar
Linus Torvalds committed
228
{
229
	struct r1conf *conf = r1_bio->mddev->private;
230
	sector_t sect = r1_bio->sector;
231 232
	int i;

233
	for (i = 0; i < conf->raid_disks * 2; i++) {
234 235 236 237
		struct bio *bio = r1_bio->bios[i];
		if (bio->bi_end_io)
			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
	}
Linus Torvalds's avatar
Linus Torvalds committed
238

239
	mempool_free(r1_bio, &conf->r1buf_pool);
Linus Torvalds's avatar
Linus Torvalds committed
240

241
	lower_barrier(conf, sect);
Linus Torvalds's avatar
Linus Torvalds committed
242 243
}

244
static void reschedule_retry(struct r1bio *r1_bio)
Linus Torvalds's avatar
Linus Torvalds committed
245 246
{
	unsigned long flags;
247
	struct mddev *mddev = r1_bio->mddev;
248
	struct r1conf *conf = mddev->private;
249
	int idx;
Linus Torvalds's avatar
Linus Torvalds committed
250

251
	idx = sector_to_idx(r1_bio->sector);
Linus Torvalds's avatar
Linus Torvalds committed
252 253
	spin_lock_irqsave(&conf->device_lock, flags);
	list_add(&r1_bio->retry_list, &conf->retry_list);
254
	atomic_inc(&conf->nr_queued[idx]);
Linus Torvalds's avatar
Linus Torvalds committed
255 256
	spin_unlock_irqrestore(&conf->device_lock, flags);

257
	wake_up(&conf->wait_barrier);
Linus Torvalds's avatar
Linus Torvalds committed
258 259 260 261 262 263 264 265
	md_wakeup_thread(mddev->thread);
}

/*
 * raid_end_bio_io() is called when we have finished servicing a mirrored
 * operation and are ready to return a success/failure code to the buffer
 * cache layer.
 */
266
static void call_bio_endio(struct r1bio *r1_bio)
267 268
{
	struct bio *bio = r1_bio->master_bio;
269
	struct r1conf *conf = r1_bio->mddev->private;
270 271

	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
272
		bio->bi_status = BLK_STS_IOERR;
273

274 275 276 277 278 279
	bio_endio(bio);
	/*
	 * Wake up any possible resync thread that waits for the device
	 * to go idle.
	 */
	allow_barrier(conf, r1_bio->sector);
280 281
}

282
static void raid_end_bio_io(struct r1bio *r1_bio)
Linus Torvalds's avatar
Linus Torvalds committed
283 284 285
{
	struct bio *bio = r1_bio->master_bio;

286 287
	/* if nobody has done the final endio yet, do it now */
	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
288 289
		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
290 291
			 (unsigned long long) bio->bi_iter.bi_sector,
			 (unsigned long long) bio_end_sector(bio) - 1);
292

293
		call_bio_endio(r1_bio);
294
	}
Linus Torvalds's avatar
Linus Torvalds committed
295 296 297 298 299 300
	free_r1bio(r1_bio);
}

/*
 * Update disk head position estimator based on IRQ completion info.
 */
301
static inline void update_head_pos(int disk, struct r1bio *r1_bio)
Linus Torvalds's avatar
Linus Torvalds committed
302
{
303
	struct r1conf *conf = r1_bio->mddev->private;
Linus Torvalds's avatar
Linus Torvalds committed
304 305 306 307 308

	conf->mirrors[disk].head_position =
		r1_bio->sector + (r1_bio->sectors);
}

309 310 311
/*
 * Find the disk number which triggered given bio
 */
312
static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
313 314
{
	int mirror;
315 316
	struct r1conf *conf = r1_bio->mddev->private;
	int raid_disks = conf->raid_disks;
317

318
	for (mirror = 0; mirror < raid_disks * 2; mirror++)
319 320 321
		if (r1_bio->bios[mirror] == bio)
			break;

322
	BUG_ON(mirror == raid_disks * 2);
323 324 325 326 327
	update_head_pos(mirror, r1_bio);

	return mirror;
}

328
static void raid1_end_read_request(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
329
{
330
	int uptodate = !bio->bi_status;
331
	struct r1bio *r1_bio = bio->bi_private;
332
	struct r1conf *conf = r1_bio->mddev->private;
333
	struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
Linus Torvalds's avatar
Linus Torvalds committed
334 335 336 337

	/*
	 * this branch is our 'one mirror IO has finished' event handler:
	 */
338
	update_head_pos(r1_bio->read_disk, r1_bio);
339

340 341
	if (uptodate)
		set_bit(R1BIO_Uptodate, &r1_bio->state);
342 343 344 345 346
	else if (test_bit(FailFast, &rdev->flags) &&
		 test_bit(R1BIO_FailFast, &r1_bio->state))
		/* This was a fail-fast read so we definitely
		 * want to retry */
		;
347 348 349 350
	else {
		/* If all other devices have failed, we want to return
		 * the error upwards rather than fail the last device.
		 * Here we redefine "uptodate" to mean "Don't want to retry"
Linus Torvalds's avatar
Linus Torvalds committed
351
		 */
352 353 354 355
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
		if (r1_bio->mddev->degraded == conf->raid_disks ||
		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
356
		     test_bit(In_sync, &rdev->flags)))
357 358 359
			uptodate = 1;
		spin_unlock_irqrestore(&conf->device_lock, flags);
	}
Linus Torvalds's avatar
Linus Torvalds committed
360

361
	if (uptodate) {
Linus Torvalds's avatar
Linus Torvalds committed
362
		raid_end_bio_io(r1_bio);
363
		rdev_dec_pending(rdev, conf->mddev);
364
	} else {
Linus Torvalds's avatar
Linus Torvalds committed
365 366 367 368
		/*
		 * oops, read error:
		 */
		char b[BDEVNAME_SIZE];
369 370 371 372
		pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
				   mdname(conf->mddev),
				   bdevname(rdev->bdev, b),
				   (unsigned long long)r1_bio->sector);
373
		set_bit(R1BIO_ReadError, &r1_bio->state);
Linus Torvalds's avatar
Linus Torvalds committed
374
		reschedule_retry(r1_bio);
375
		/* don't drop the reference on read_disk yet */
Linus Torvalds's avatar
Linus Torvalds committed
376 377 378
	}
}

379
static void close_write(struct r1bio *r1_bio)
380 381 382
{
	/* it really is the end of this request */
	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
383 384 385
		bio_free_pages(r1_bio->behind_master_bio);
		bio_put(r1_bio->behind_master_bio);
		r1_bio->behind_master_bio = NULL;
386 387
	}
	/* clear the bitmap if all writes complete successfully */
388 389 390 391
	md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
			   r1_bio->sectors,
			   !test_bit(R1BIO_Degraded, &r1_bio->state),
			   test_bit(R1BIO_BehindIO, &r1_bio->state));
392 393 394
	md_write_end(r1_bio->mddev);
}

395
static void r1_bio_write_done(struct r1bio *r1_bio)
396
{
397 398 399 400 401 402 403
	if (!atomic_dec_and_test(&r1_bio->remaining))
		return;

	if (test_bit(R1BIO_WriteError, &r1_bio->state))
		reschedule_retry(r1_bio);
	else {
		close_write(r1_bio);
404 405 406 407
		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
			reschedule_retry(r1_bio);
		else
			raid_end_bio_io(r1_bio);
408 409 410
	}
}

411
static void raid1_end_write_request(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
412
{
413
	struct r1bio *r1_bio = bio->bi_private;
414
	int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
415
	struct r1conf *conf = r1_bio->mddev->private;
416
	struct bio *to_put = NULL;
417 418
	int mirror = find_bio_disk(r1_bio, bio);
	struct md_rdev *rdev = conf->mirrors[mirror].rdev;
Shaohua Li's avatar
Shaohua Li committed
419 420
	bool discard_error;

421
	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
Linus Torvalds's avatar
Linus Torvalds committed
422

423 424 425
	/*
	 * 'one mirror IO has finished' event handler:
	 */
426
	if (bio->bi_status && !discard_error) {
427 428
		set_bit(WriteErrorSeen,	&rdev->flags);
		if (!test_and_set_bit(WantReplacement, &rdev->flags))
429 430 431
			set_bit(MD_RECOVERY_NEEDED, &
				conf->mddev->recovery);

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
		if (test_bit(FailFast, &rdev->flags) &&
		    (bio->bi_opf & MD_FAILFAST) &&
		    /* We never try FailFast to WriteMostly devices */
		    !test_bit(WriteMostly, &rdev->flags)) {
			md_error(r1_bio->mddev, rdev);
			if (!test_bit(Faulty, &rdev->flags))
				/* This is the only remaining device,
				 * We need to retry the write without
				 * FailFast
				 */
				set_bit(R1BIO_WriteError, &r1_bio->state);
			else {
				/* Finished with this branch */
				r1_bio->bios[mirror] = NULL;
				to_put = bio;
			}
		} else
			set_bit(R1BIO_WriteError, &r1_bio->state);
450
	} else {
Linus Torvalds's avatar
Linus Torvalds committed
451
		/*
452 453 454 455 456 457 458 459
		 * Set R1BIO_Uptodate in our master bio, so that we
		 * will return a good error code for to the higher
		 * levels even if IO on some other mirrored buffer
		 * fails.
		 *
		 * The 'master' represents the composite IO operation
		 * to user-side. So if something waits for IO, then it
		 * will wait for the 'master' bio.
Linus Torvalds's avatar
Linus Torvalds committed
460
		 */
461 462 463
		sector_t first_bad;
		int bad_sectors;

464 465
		r1_bio->bios[mirror] = NULL;
		to_put = bio;
466 467 468 469 470 471 472 473
		/*
		 * Do not set R1BIO_Uptodate if the current device is
		 * rebuilding or Faulty. This is because we cannot use
		 * such device for properly reading the data back (we could
		 * potentially use it, if the current write would have felt
		 * before rdev->recovery_offset, but for simplicity we don't
		 * check this here.
		 */
474 475
		if (test_bit(In_sync, &rdev->flags) &&
		    !test_bit(Faulty, &rdev->flags))
476
			set_bit(R1BIO_Uptodate, &r1_bio->state);
477

478
		/* Maybe we can clear some bad blocks. */
479
		if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
Shaohua Li's avatar
Shaohua Li committed
480
				&first_bad, &bad_sectors) && !discard_error) {
481 482 483 484 485
			r1_bio->bios[mirror] = IO_MADE_GOOD;
			set_bit(R1BIO_MadeGood, &r1_bio->state);
		}
	}

486
	if (behind) {
487
		if (test_bit(WriteMostly, &rdev->flags))
488 489 490 491 492 493 494 495 496 497 498 499 500 501
			atomic_dec(&r1_bio->behind_remaining);

		/*
		 * In behind mode, we ACK the master bio once the I/O
		 * has safely reached all non-writemostly
		 * disks. Setting the Returned bit ensures that this
		 * gets done only once -- we don't ever want to return
		 * -EIO here, instead we'll wait
		 */
		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
			/* Maybe we can return now */
			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
				struct bio *mbio = r1_bio->master_bio;
502 503
				pr_debug("raid1: behind end write sectors"
					 " %llu-%llu\n",
504 505
					 (unsigned long long) mbio->bi_iter.bi_sector,
					 (unsigned long long) bio_end_sector(mbio) - 1);
506
				call_bio_endio(r1_bio);
507 508 509
			}
		}
	}
510
	if (r1_bio->bios[mirror] == NULL)
511
		rdev_dec_pending(rdev, conf->mddev);
512

Linus Torvalds's avatar
Linus Torvalds committed
513 514 515 516
	/*
	 * Let's see if all mirrored write operations have finished
	 * already.
	 */
517
	r1_bio_write_done(r1_bio);
518

519 520
	if (to_put)
		bio_put(to_put);
Linus Torvalds's avatar
Linus Torvalds committed
521 522
}

523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
static sector_t align_to_barrier_unit_end(sector_t start_sector,
					  sector_t sectors)
{
	sector_t len;

	WARN_ON(sectors == 0);
	/*
	 * len is the number of sectors from start_sector to end of the
	 * barrier unit which start_sector belongs to.
	 */
	len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
	      start_sector;

	if (len > sectors)
		len = sectors;

	return len;
}

Linus Torvalds's avatar
Linus Torvalds committed
542 543 544 545 546 547 548 549 550 551 552 553 554 555
/*
 * This routine returns the disk from which the requested read should
 * be done. There is a per-array 'next expected sequential IO' sector
 * number - if this matches on the next IO then we use the last disk.
 * There is also a per-disk 'last know head position' sector that is
 * maintained from IRQ contexts, both the normal and the resync IO
 * completion handlers update this position correctly. If there is no
 * perfect sequential match then we pick the disk whose head is closest.
 *
 * If there are 2 mirrors in the same 2 devices, performance degrades
 * because position is mirror, not device based.
 *
 * The rdev for the device selected will have nr_pending incremented.
 */
556
static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
Linus Torvalds's avatar
Linus Torvalds committed
557
{
558
	const sector_t this_sector = r1_bio->sector;
559 560
	int sectors;
	int best_good_sectors;
561 562
	int best_disk, best_dist_disk, best_pending_disk;
	int has_nonrot_disk;
563
	int disk;
564
	sector_t best_dist;
565
	unsigned int min_pending;
566
	struct md_rdev *rdev;
567
	int choose_first;
568
	int choose_next_idle;
Linus Torvalds's avatar
Linus Torvalds committed
569 570 571

	rcu_read_lock();
	/*
572
	 * Check if we can balance. We can balance on the whole
Linus Torvalds's avatar
Linus Torvalds committed
573 574 575 576
	 * device if no resync is going on, or below the resync window.
	 * We take the first readable disk when above the resync window.
	 */
 retry:
577
	sectors = r1_bio->sectors;
578
	best_disk = -1;
579
	best_dist_disk = -1;
580
	best_dist = MaxSector;
581 582
	best_pending_disk = -1;
	min_pending = UINT_MAX;
583
	best_good_sectors = 0;
584
	has_nonrot_disk = 0;
585
	choose_next_idle = 0;
586
	clear_bit(R1BIO_FailFast, &r1_bio->state);
587

588 589
	if ((conf->mddev->recovery_cp < this_sector + sectors) ||
	    (mddev_is_clustered(conf->mddev) &&
590
	    md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
591 592 593 594
		    this_sector + sectors)))
		choose_first = 1;
	else
		choose_first = 0;
Linus Torvalds's avatar
Linus Torvalds committed
595

596
	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
597
		sector_t dist;
598 599
		sector_t first_bad;
		int bad_sectors;
600
		unsigned int pending;
601
		bool nonrot;
602

603 604 605
		rdev = rcu_dereference(conf->mirrors[disk].rdev);
		if (r1_bio->bios[disk] == IO_BLOCKED
		    || rdev == NULL
606
		    || test_bit(Faulty, &rdev->flags))
607
			continue;
608 609
		if (!test_bit(In_sync, &rdev->flags) &&
		    rdev->recovery_offset < this_sector + sectors)
Linus Torvalds's avatar
Linus Torvalds committed
610
			continue;
611 612 613
		if (test_bit(WriteMostly, &rdev->flags)) {
			/* Don't balance among write-mostly, just
			 * use the first as a last resort */
614
			if (best_dist_disk < 0) {
615 616
				if (is_badblock(rdev, this_sector, sectors,
						&first_bad, &bad_sectors)) {
617
					if (first_bad <= this_sector)
618 619 620 621 622
						/* Cannot use this */
						continue;
					best_good_sectors = first_bad - this_sector;
				} else
					best_good_sectors = sectors;
623 624
				best_dist_disk = disk;
				best_pending_disk = disk;
625
			}
626 627 628 629 630
			continue;
		}
		/* This is a reasonable device to use.  It might
		 * even be best.
		 */
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
		if (is_badblock(rdev, this_sector, sectors,
				&first_bad, &bad_sectors)) {
			if (best_dist < MaxSector)
				/* already have a better device */
				continue;
			if (first_bad <= this_sector) {
				/* cannot read here. If this is the 'primary'
				 * device, then we must not read beyond
				 * bad_sectors from another device..
				 */
				bad_sectors -= (this_sector - first_bad);
				if (choose_first && sectors > bad_sectors)
					sectors = bad_sectors;
				if (best_good_sectors > sectors)
					best_good_sectors = sectors;

			} else {
				sector_t good_sectors = first_bad - this_sector;
				if (good_sectors > best_good_sectors) {
					best_good_sectors = good_sectors;
					best_disk = disk;
				}
				if (choose_first)
					break;
			}
			continue;
657 658 659
		} else {
			if ((sectors > best_good_sectors) && (best_disk >= 0))
				best_disk = -1;
660
			best_good_sectors = sectors;
661
		}
662

663 664 665 666
		if (best_disk >= 0)
			/* At least two disks to choose from so failfast is OK */
			set_bit(R1BIO_FailFast, &r1_bio->state);

667 668
		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
		has_nonrot_disk |= nonrot;
669
		pending = atomic_read(&rdev->nr_pending);
670
		dist = abs(this_sector - conf->mirrors[disk].head_position);
671
		if (choose_first) {
672
			best_disk = disk;
Linus Torvalds's avatar
Linus Torvalds committed
673 674
			break;
		}
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
		/* Don't change to another disk for sequential reads */
		if (conf->mirrors[disk].next_seq_sect == this_sector
		    || dist == 0) {
			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
			struct raid1_info *mirror = &conf->mirrors[disk];

			best_disk = disk;
			/*
			 * If buffered sequential IO size exceeds optimal
			 * iosize, check if there is idle disk. If yes, choose
			 * the idle disk. read_balance could already choose an
			 * idle disk before noticing it's a sequential IO in
			 * this disk. This doesn't matter because this disk
			 * will idle, next time it will be utilized after the
			 * first disk has IO size exceeds optimal iosize. In
			 * this way, iosize of the first disk will be optimal
			 * iosize at least. iosize of the second disk might be
			 * small, but not a big deal since when the second disk
			 * starts IO, the first disk is likely still busy.
			 */
			if (nonrot && opt_iosize > 0 &&
			    mirror->seq_start != MaxSector &&
			    mirror->next_seq_sect > opt_iosize &&
			    mirror->next_seq_sect - opt_iosize >=
			    mirror->seq_start) {
				choose_next_idle = 1;
				continue;
			}
			break;
		}

		if (choose_next_idle)
			continue;
708 709 710 711 712 713

		if (min_pending > pending) {
			min_pending = pending;
			best_pending_disk = disk;
		}

714 715
		if (dist < best_dist) {
			best_dist = dist;
716
			best_dist_disk = disk;
Linus Torvalds's avatar
Linus Torvalds committed
717
		}
718
	}
Linus Torvalds's avatar
Linus Torvalds committed
719

720 721 722 723 724 725 726
	/*
	 * If all disks are rotational, choose the closest disk. If any disk is
	 * non-rotational, choose the disk with less pending request even the
	 * disk is rotational, which might/might not be optimal for raids with
	 * mixed ratation/non-rotational disks depending on workload.
	 */
	if (best_disk == -1) {
727
		if (has_nonrot_disk || min_pending == 0)
728 729 730 731 732
			best_disk = best_pending_disk;
		else
			best_disk = best_dist_disk;
	}

733 734
	if (best_disk >= 0) {
		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
735 736 737
		if (!rdev)
			goto retry;
		atomic_inc(&rdev->nr_pending);
738
		sectors = best_good_sectors;
739 740 741 742

		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
			conf->mirrors[best_disk].seq_start = this_sector;

743
		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
Linus Torvalds's avatar
Linus Torvalds committed
744 745
	}
	rcu_read_unlock();
746
	*max_sectors = sectors;
Linus Torvalds's avatar
Linus Torvalds committed
747

748
	return best_disk;
Linus Torvalds's avatar
Linus Torvalds committed
749 750
}

751
static int raid1_congested(struct mddev *mddev, int bits)
752
{
753
	struct r1conf *conf = mddev->private;
754 755
	int i, ret = 0;

756
	if ((bits & (1 << WB_async_congested)) &&
757 758 759
	    conf->pending_count >= max_queued_requests)
		return 1;

760
	rcu_read_lock();
761
	for (i = 0; i < conf->raid_disks * 2; i++) {
762
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
763
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
764
			struct request_queue *q = bdev_get_queue(rdev->bdev);
765

766 767
			BUG_ON(!q);

768 769 770
			/* Note the '|| 1' - when read_balance prefers
			 * non-congested targets, it can be removed
			 */
771
			if ((bits & (1 << WB_async_congested)) || 1)
772
				ret |= bdi_congested(q->backing_dev_info, bits);
773
			else
774
				ret &= bdi_congested(q->backing_dev_info, bits);
775 776 777 778 779 780
		}
	}
	rcu_read_unlock();
	return ret;
}

781 782 783
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
{
	/* flush any pending bitmap writes to disk before proceeding w/ I/O */
784
	md_bitmap_unplug(conf->mddev->bitmap);
785 786 787 788
	wake_up(&conf->wait_barrier);

	while (bio) { /* submit pending writes */
		struct bio *next = bio->bi_next;
789
		struct md_rdev *rdev = (void *)bio->bi_disk;
790
		bio->bi_next = NULL;
791
		bio_set_dev(bio, rdev->bdev);
792
		if (test_bit(Faulty, &rdev->flags)) {
793
			bio_io_error(bio);
794
		} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
795
				    !blk_queue_discard(bio->bi_disk->queue)))
796 797 798 799 800 801 802 803
			/* Just ignore it */
			bio_endio(bio);
		else
			generic_make_request(bio);
		bio = next;
	}
}

804
static void flush_pending_writes(struct r1conf *conf)
805 806 807 808 809 810 811
{
	/* Any writes that have been queued but are awaiting
	 * bitmap updates get flushed here.
	 */
	spin_lock_irq(&conf->device_lock);

	if (conf->pending_bio_list.head) {
812
		struct blk_plug plug;
813
		struct bio *bio;
814

815
		bio = bio_list_get(&conf->pending_bio_list);
816
		conf->pending_count = 0;
817
		spin_unlock_irq(&conf->device_lock);
818 819 820 821 822 823 824 825 826 827 828

		/*
		 * As this is called in a wait_event() loop (see freeze_array),
		 * current->state might be TASK_UNINTERRUPTIBLE which will
		 * cause a warning when we prepare to wait again.  As it is
		 * rare that this path is taken, it is perfectly safe to force
		 * us to go around the wait_event() loop again, so the warning
		 * is a false-positive.  Silence the warning by resetting
		 * thread state
		 */
		__set_current_state(TASK_RUNNING);
829
		blk_start_plug(&plug);
830
		flush_bio_list(conf, bio);
831
		blk_finish_plug(&plug);
832 833
	} else
		spin_unlock_irq(&conf->device_lock);
834 835
}

836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
/* Barriers....
 * Sometimes we need to suspend IO while we do something else,
 * either some resync/recovery, or reconfigure the array.
 * To do this we raise a 'barrier'.
 * The 'barrier' is a counter that can be raised multiple times
 * to count how many activities are happening which preclude
 * normal IO.
 * We can only raise the barrier if there is no pending IO.
 * i.e. if nr_pending == 0.
 * We choose only to raise the barrier if no-one is waiting for the
 * barrier to go down.  This means that as soon as an IO request
 * is ready, no other operations which require a barrier will start
 * until the IO request has had a chance.
 *
 * So: regular IO calls 'wait_barrier'.  When that returns there
 *    is no backgroup IO happening,  It must arrange to call
 *    allow_barrier when it has finished its IO.
 * backgroup IO calls must call raise_barrier.  Once that returns
 *    there is no normal IO happeing.  It must arrange to call
 *    lower_barrier when the particular background IO completes.
Linus Torvalds's avatar
Linus Torvalds committed
856
 */
857
static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
Linus Torvalds's avatar
Linus Torvalds committed
858
{
859 860
	int idx = sector_to_idx(sector_nr);

Linus Torvalds's avatar
Linus Torvalds committed
861
	spin_lock_irq(&conf->resync_lock);
862 863

	/* Wait until no block IO is waiting */
864 865
	wait_event_lock_irq(conf->wait_barrier,
			    !atomic_read(&conf->nr_waiting[idx]),
866
			    conf->resync_lock);
867 868

	/* block any new IO from starting */
869 870 871 872 873 874 875 876 877 878
	atomic_inc(&conf->barrier[idx]);
	/*
	 * In raise_barrier() we firstly increase conf->barrier[idx] then
	 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
	 * increase conf->nr_pending[idx] then check conf->barrier[idx].
	 * A memory barrier here to make sure conf->nr_pending[idx] won't
	 * be fetched before conf->barrier[idx] is increased. Otherwise
	 * there will be a race between raise_barrier() and _wait_barrier().
	 */
	smp_mb__after_atomic();
879

880 881
	/* For these conditions we must wait:
	 * A: while the array is in frozen state
882 883 884 885
	 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
	 *    existing in corresponding I/O barrier bucket.
	 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
	 *    max resync count which allowed on current I/O barrier bucket.
886
	 */
887
	wait_event_lock_irq(conf->wait_barrier,
888
			    (!conf->array_frozen &&
889
			     !atomic_read(&conf->nr_pending[idx]) &&
890 891
			     atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
				test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
892
			    conf->resync_lock);
893

894 895 896 897 898 899 900
	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
		atomic_dec(&conf->barrier[idx]);
		spin_unlock_irq(&conf->resync_lock);
		wake_up(&conf->wait_barrier);
		return -EINTR;
	}

901
	atomic_inc(&conf->nr_sync_pending);
902
	spin_unlock_irq(&conf->resync_lock);
903 904

	return 0;
905 906
}

907
static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
908
{
909 910
	int idx = sector_to_idx(sector_nr);

911
	BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
912

913
	atomic_dec(&conf->barrier[idx]);
914
	atomic_dec(&conf->nr_sync_pending);
915 916 917
	wake_up(&conf->wait_barrier);
}

918
static void _wait_barrier(struct r1conf *conf, int idx)
919
{
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
	/*
	 * We need to increase conf->nr_pending[idx] very early here,
	 * then raise_barrier() can be blocked when it waits for
	 * conf->nr_pending[idx] to be 0. Then we can avoid holding
	 * conf->resync_lock when there is no barrier raised in same
	 * barrier unit bucket. Also if the array is frozen, I/O
	 * should be blocked until array is unfrozen.
	 */
	atomic_inc(&conf->nr_pending[idx]);
	/*
	 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
	 * check conf->barrier[idx]. In raise_barrier() we firstly increase
	 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
	 * barrier is necessary here to make sure conf->barrier[idx] won't be
	 * fetched before conf->nr_pending[idx] is increased. Otherwise there
	 * will be a race between _wait_barrier() and raise_barrier().
	 */
	smp_mb__after_atomic();
938

939 940 941 942 943 944 945 946 947 948 949 950
	/*
	 * Don't worry about checking two atomic_t variables at same time
	 * here. If during we check conf->barrier[idx], the array is
	 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
	 * 0, it is safe to return and make the I/O continue. Because the
	 * array is frozen, all I/O returned here will eventually complete
	 * or be queued, no race will happen. See code comment in
	 * frozen_array().
	 */
	if (!READ_ONCE(conf->array_frozen) &&
	    !atomic_read(&conf->barrier[idx]))
		return;
951

952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
	/*
	 * After holding conf->resync_lock, conf->nr_pending[idx]
	 * should be decreased before waiting for barrier to drop.
	 * Otherwise, we may encounter a race condition because
	 * raise_barrer() might be waiting for conf->nr_pending[idx]
	 * to be 0 at same time.
	 */
	spin_lock_irq(&conf->resync_lock);
	atomic_inc(&conf->nr_waiting[idx]);
	atomic_dec(&conf->nr_pending[idx]);
	/*
	 * In case freeze_array() is waiting for
	 * get_unqueued_pending() == extra
	 */
	wake_up(&conf->wait_barrier);
	/* Wait for the barrier in same barrier unit bucket to drop. */
	wait_event_lock_irq(conf->wait_barrier,
			    !conf->array_frozen &&
			     !atomic_read(&conf->barrier[idx]),
			    conf->resync_lock);
	atomic_inc(&conf->nr_pending[idx]);
	atomic_dec(&conf->nr_waiting[idx]);
974
	spin_unlock_irq(&conf->resync_lock);
975 976
}

977
static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
978
{
979
	int idx = sector_to_idx(sector_nr);
980

981 982 983 984 985 986 987 988
	/*
	 * Very similar to _wait_barrier(). The difference is, for read
	 * I/O we don't need wait for sync I/O, but if the whole array
	 * is frozen, the read I/O still has to wait until the array is
	 * unfrozen. Since there is no ordering requirement with
	 * conf->barrier[idx] here, memory barrier is unnecessary as well.
	 */
	atomic_inc(&conf->nr_pending[idx]);
989

990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	if (!READ_ONCE(conf->array_frozen))
		return;

	spin_lock_irq(&conf->resync_lock);
	atomic_inc(&conf->nr_waiting[idx]);
	atomic_dec(&conf->nr_pending[idx]);
	/*
	 * In case freeze_array() is waiting for
	 * get_unqueued_pending() == extra
	 */
	wake_up(&conf->wait_barrier);
	/* Wait for array to be unfrozen */
	wait_event_lock_irq(conf->wait_barrier,
			    !conf->array_frozen,
			    conf->resync_lock);
	atomic_inc(&conf->nr_pending[idx]);
	atomic_dec(&conf->nr_waiting[idx]);
Linus Torvalds's avatar
Linus Torvalds committed
1007 1008 1009
	spin_unlock_irq(&conf->resync_lock);
}

1010
static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1011
{
1012
	int idx = sector_to_idx(sector_nr);
1013

1014 1015 1016 1017
	_wait_barrier(conf, idx);
}

static void _allow_barrier(struct r1conf *conf, int idx)
1018
{
1019
	atomic_dec(&conf->nr_pending[idx]);
1020 1021 1022
	wake_up(&conf->wait_barrier);
}

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
{
	int idx = sector_to_idx(sector_nr);

	_allow_barrier(conf, idx);
}

/* conf->resync_lock should be held */
static int get_unqueued_pending(struct r1conf *conf)
{
	int idx, ret;

1035 1036
	ret = atomic_read(&conf->nr_sync_pending);
	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1037 1038
		ret += atomic_read(&conf->nr_pending[idx]) -
			atomic_read(&conf->nr_queued[idx]);
1039 1040 1041 1042

	return ret;
}

1043
static void freeze_array(struct r1conf *conf, int extra)
1044
{
1045
	/* Stop sync I/O and normal I/O and wait for everything to
1046
	 * go quiet.
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
	 * This is called in two situations:
	 * 1) management command handlers (reshape, remove disk, quiesce).
	 * 2) one normal I/O request failed.

	 * After array_frozen is set to 1, new sync IO will be blocked at
	 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
	 * or wait_read_barrier(). The flying I/Os will either complete or be
	 * queued. When everything goes quite, there are only queued I/Os left.

	 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
	 * barrier bucket index which this I/O request hits. When all sync and
	 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
	 * of all conf->nr_queued[]. But normal I/O failure is an exception,
	 * in handle_read_error(), we may call freeze_array() before trying to
	 * fix the read error. In this case, the error read I/O is not queued,
	 * so get_unqueued_pending() == 1.
	 *
	 * Therefore before this function returns, we need to wait until
	 * get_unqueued_pendings(conf) gets equal to extra. For
	 * normal I/O context, extra is 1, in rested situations extra is 0.
1067 1068
	 */
	spin_lock_irq(&conf->resync_lock);
1069
	conf->array_frozen = 1;
1070
	raid1_log(conf->mddev, "wait freeze");
1071 1072 1073 1074 1075
	wait_event_lock_irq_cmd(
		conf->wait_barrier,
		get_unqueued_pending(conf) == extra,
		conf->resync_lock,
		flush_pending_writes(conf));
1076 1077
	spin_unlock_irq(&conf->resync_lock);
}
1078
static void unfreeze_array(struct r1conf *conf)
1079 1080 1081
{
	/* reverse the effect of the freeze */
	spin_lock_irq(&conf->resync_lock);
1082
	conf->array_frozen = 0;
1083
	spin_unlock_irq(&conf->resync_lock);
1084
	wake_up(&conf->wait_barrier);
1085 1086
}

1087
static void alloc_behind_master_bio(struct r1bio *r1_bio,
1088
					   struct bio *bio)
1089
{
1090
	int size = bio->bi_iter.bi_size;
1091 1092 1093 1094 1095 1096
	unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	int i = 0;
	struct bio *behind_bio = NULL;

	behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
	if (!behind_bio)
1097
		return;
1098

1099
	/* discard op, we don't support writezero/writesame yet */
1100 1101
	if (!bio_has_data(bio)) {
		behind_bio->bi_iter.bi_size = size;
1102
		goto skip_copy;
1103
	}
1104

1105 1106
	behind_bio->bi_write_hint = bio->bi_write_hint;

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	while (i < vcnt && size) {
		struct page *page;
		int len = min_t(int, PAGE_SIZE, size);

		page = alloc_page(GFP_NOIO);
		if (unlikely(!page))
			goto free_pages;

		bio_add_page(behind_bio, page, len, 0);

		size -= len;
		i++;
1119
	}
1120

1121
	bio_copy_data(behind_bio, bio);
1122
skip_copy:
1123
	r1_bio->behind_master_bio = behind_bio;
1124
	set_bit(R1BIO_BehindIO, &r1_bio->state);
1125

1126
	return;
1127 1128

free_pages:
1129 1130
	pr_debug("%dB behind alloc failed, doing sync I/O\n",
		 bio->bi_iter.bi_size);
1131
	bio_free_pages(behind_bio);
1132
	bio_put(behind_bio);
1133 1134
}

1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
struct raid1_plug_cb {
	struct blk_plug_cb	cb;
	struct bio_list		pending;
	int			pending_cnt;
};

static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
						  cb);
	struct mddev *mddev = plug->cb.data;
	struct r1conf *conf = mddev->private;
	struct bio *bio;

1149
	if (from_schedule || current->bio_list) {
1150 1151 1152 1153
		spin_lock_irq(&conf->device_lock);
		bio_list_merge(&conf->pending_bio_list, &plug->pending);
		conf->pending_count += plug->pending_cnt;
		spin_unlock_irq(&conf->device_lock);
1154
		wake_up(&conf->wait_barrier);
1155 1156 1157 1158 1159 1160 1161
		md_wakeup_thread(mddev->thread);
		kfree(plug);
		return;
	}

	/* we aren't scheduling, so we can do the write-out directly. */
	bio = bio_list_get(&plug->pending);
1162
	flush_bio_list(conf, bio);
1163 1164 1165
	kfree(plug);
}

1166 1167 1168 1169 1170 1171 1172 1173 1174
static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
{
	r1_bio->master_bio = bio;
	r1_bio->sectors = bio_sectors(bio);
	r1_bio->state = 0;
	r1_bio->mddev = mddev;
	r1_bio->sector = bio->bi_iter.bi_sector;
}

1175
static inline struct r1bio *
1176
alloc_r1bio(struct mddev *mddev, struct bio *bio)
1177 1178 1179 1180
{
	struct r1conf *conf = mddev->private;
	struct r1bio *r1_bio;

1181
	r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1182 1183 1184
	/* Ensure no bio records IO_BLOCKED */
	memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
	init_r1bio(r1_bio, mddev, bio);
1185 1186 1187
	return r1_bio;
}

1188
static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1189
			       int max_read_sectors, struct r1bio *r1_bio)
Linus Torvalds's avatar
Linus Torvalds committed
1190
{
1191
	struct r1conf *conf = mddev->private;
1192
	struct raid1_info *mirror;
Linus Torvalds's avatar
Linus Torvalds committed
1193
	struct bio *read_bio;
1194 1195 1196 1197 1198
	struct bitmap *bitmap = mddev->bitmap;
	const int op = bio_op(bio);
	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
	int max_sectors;
	int rdisk;
1199 1200
	bool print_msg = !!r1_bio;
	char b[BDEVNAME_SIZE];
1201

1202
	/*
1203 1204 1205
	 * If r1_bio is set, we are blocking the raid1d thread
	 * so there is a tiny risk of deadlock.  So ask for
	 * emergency memory if needed.
1206
	 */
1207
	gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1208

1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	if (print_msg) {
		/* Need to get the block device name carefully */
		struct md_rdev *rdev;
		rcu_read_lock();
		rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
		if (rdev)
			bdevname(rdev->bdev, b);
		else
			strcpy(b, "???");
		rcu_read_unlock();
	}
1220

1221 1222 1223 1224 1225 1226
	/*
	 * Still need barrier for READ in case that whole
	 * array is frozen.
	 */
	wait_read_barrier(conf, bio->bi_iter.bi_sector);

1227 1228 1229 1230
	if (!r1_bio)
		r1_bio = alloc_r1bio(mddev, bio);
	else
		init_r1bio(r1_bio, mddev, bio);
1231
	r1_bio->sectors = max_read_sectors;
1232 1233 1234 1235 1236

	/*
	 * make_request() can abort the operation when read-ahead is being
	 * used and no empty request is available.
	 */
1237 1238 1239 1240
	rdisk = read_balance(conf, r1_bio, &max_sectors);

	if (rdisk < 0) {
		/* couldn't find anywhere to read from */
1241 1242 1243 1244 1245 1246
		if (print_msg) {
			pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
					    mdname(mddev),
					    b,
					    (unsigned long long)r1_bio->sector);
		}
1247 1248 1249 1250 1251
		raid_end_bio_io(r1_bio);
		return;
	}
	mirror = conf->mirrors + rdisk;

1252 1253 1254 1255 1256 1257
	if (print_msg)
		pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
				    mdname(mddev),
				    (unsigned long long)r1_bio->sector,
				    bdevname(mirror->rdev->bdev, b));

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
	if (test_bit(WriteMostly, &mirror->rdev->flags) &&
	    bitmap) {
		/*
		 * Reading from a write-mostly device must take care not to
		 * over-take any writes that are 'behind'
		 */
		raid1_log(mddev, "wait behind writes");
		wait_event(bitmap->behind_wait,
			   atomic_read(&bitmap->behind_writes) == 0);
	}
1268 1269 1270

	if (max_sectors < bio_sectors(bio)) {
		struct bio *split = bio_split(bio, max_sectors,
1271
					      gfp, &conf->bio_split);
1272 1273 1274 1275 1276 1277 1278
		bio_chain(split, bio);
		generic_make_request(bio);
		bio = split;
		r1_bio->master_bio = bio;
		r1_bio->sectors = max_sectors;
	}

1279 1280
	r1_bio->read_disk = rdisk;

1281
	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1282 1283 1284 1285 1286

	r1_bio->bios[rdisk] = read_bio;

	read_bio->bi_iter.bi_sector = r1_bio->sector +
		mirror->rdev->data_offset;
1287
	bio_set_dev(read_bio, mirror->rdev->bdev);
1288 1289 1290 1291 1292 1293 1294 1295
	read_bio->bi_end_io = raid1_end_read_request;
	bio_set_op_attrs(read_bio, op, do_sync);
	if (test_bit(FailFast, &mirror->rdev->flags) &&
	    test_bit(R1BIO_FailFast, &r1_bio->state))
	        read_bio->bi_opf |= MD_FAILFAST;
	read_bio->bi_private = r1_bio;

	if (mddev->gendisk)
1296 1297
	        trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
				disk_devt(mddev->gendisk), r1_bio->sector);
1298

1299
	generic_make_request(read_bio);
1300 1301
}

1302 1303
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
				int max_write_sectors)
1304 1305
{
	struct r1conf *conf = mddev->private;
1306
	struct r1bio *r1_bio;
1307
	int i, disks;
1308
	struct bitmap *bitmap = mddev->bitmap;
1309
	unsigned long flags;
1310
	struct md_rdev *blocked_rdev;
1311 1312
	struct blk_plug_cb *cb;
	struct raid1_plug_cb *plug = NULL;
1313 1314
	int first_clone;
	int max_sectors;
1315

1316
	if (mddev_is_clustered(mddev) &&
1317
	     md_cluster_ops->area_resyncing(mddev, WRITE,