page-writeback.c 36.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
/*
2
 * mm/page-writeback.c
Linus Torvalds's avatar
Linus Torvalds committed
3
4
 *
 * Copyright (C) 2002, Linus Torvalds.
5
 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Linus Torvalds's avatar
Linus Torvalds committed
6
7
8
9
 *
 * Contains functions related to writing back dirty pages at the
 * address_space level.
 *
10
 * 10Apr2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
 *		Initial version
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/init.h>
#include <linux/backing-dev.h>
25
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
26
27
#include <linux/blkdev.h>
#include <linux/mpage.h>
28
#include <linux/rmap.h>
Linus Torvalds's avatar
Linus Torvalds committed
29
30
31
32
33
34
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
35
#include <linux/buffer_head.h>
36
#include <linux/pagevec.h>
Linus Torvalds's avatar
Linus Torvalds committed
37
38
39
40
41
42
43
44
45
46

/*
 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
 * will look to see if it needs to force writeback or throttling.
 */
static long ratelimit_pages = 32;

/*
 * When balance_dirty_pages decides that the caller needs to perform some
 * non-background writeback, this is how many pages it will attempt to write.
47
 * It should be somewhat larger than dirtied pages to ensure that reasonably
Linus Torvalds's avatar
Linus Torvalds committed
48
49
 * large amounts of I/O are submitted.
 */
50
static inline long sync_writeback_pages(unsigned long dirtied)
Linus Torvalds's avatar
Linus Torvalds committed
51
{
52
53
54
55
	if (dirtied < ratelimit_pages)
		dirtied = ratelimit_pages;

	return dirtied + dirtied / 2;
Linus Torvalds's avatar
Linus Torvalds committed
56
57
58
59
60
}

/* The following parameters are exported via /proc/sys/vm */

/*
61
 * Start background writeback (via writeback threads) at this percentage
Linus Torvalds's avatar
Linus Torvalds committed
62
 */
63
int dirty_background_ratio = 10;
Linus Torvalds's avatar
Linus Torvalds committed
64

65
66
67
68
69
70
/*
 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
 * dirty_background_ratio * the amount of dirtyable memory
 */
unsigned long dirty_background_bytes;

71
72
73
74
75
76
/*
 * free highmem will not be subtracted from the total free memory
 * for calculating free ratios if vm_highmem_is_dirtyable is true
 */
int vm_highmem_is_dirtyable;

Linus Torvalds's avatar
Linus Torvalds committed
77
78
79
/*
 * The generator of dirty data starts writeback at this percentage
 */
80
int vm_dirty_ratio = 20;
Linus Torvalds's avatar
Linus Torvalds committed
81

82
83
84
85
86
87
/*
 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
 * vm_dirty_ratio * the amount of dirtyable memory
 */
unsigned long vm_dirty_bytes;

Linus Torvalds's avatar
Linus Torvalds committed
88
/*
89
 * The interval between `kupdate'-style writebacks
Linus Torvalds's avatar
Linus Torvalds committed
90
 */
91
unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
Linus Torvalds's avatar
Linus Torvalds committed
92
93

/*
94
 * The longest time for which data is allowed to remain dirty
Linus Torvalds's avatar
Linus Torvalds committed
95
 */
96
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
Linus Torvalds's avatar
Linus Torvalds committed
97
98
99
100
101
102
103

/*
 * Flag that makes the machine dump writes/reads and block dirtyings.
 */
int block_dump;

/*
104
105
 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 * a full sync is triggered after this time elapses without any disk activity.
Linus Torvalds's avatar
Linus Torvalds committed
106
107
108
109
110
111
112
113
 */
int laptop_mode;

EXPORT_SYMBOL(laptop_mode);

/* End of sysctl-exported parameters */


114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
/*
 * Scale the writeback cache size proportional to the relative writeout speeds.
 *
 * We do this by keeping a floating proportion between BDIs, based on page
 * writeback completions [end_page_writeback()]. Those devices that write out
 * pages fastest will get the larger share, while the slower will get a smaller
 * share.
 *
 * We use page writeout completions because we are interested in getting rid of
 * dirty pages. Having them written out is the primary goal.
 *
 * We introduce a concept of time, a period over which we measure these events,
 * because demand can/will vary over time. The length of this period itself is
 * measured in page writeback completions.
 *
 */
static struct prop_descriptor vm_completions;
Peter Zijlstra's avatar
Peter Zijlstra committed
131
static struct prop_descriptor vm_dirties;
132
133
134
135
136
137
138
139
140
141

/*
 * couple the period to the dirty_ratio:
 *
 *   period/2 ~ roundup_pow_of_two(dirty limit)
 */
static int calc_period_shift(void)
{
	unsigned long dirty_total;

142
143
144
145
146
	if (vm_dirty_bytes)
		dirty_total = vm_dirty_bytes / PAGE_SIZE;
	else
		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
				100;
147
148
149
150
	return 2 + ilog2(dirty_total - 1);
}

/*
151
 * update the period when the dirty threshold changes.
152
 */
153
154
155
156
157
158
159
160
static void update_completion_period(void)
{
	int shift = calc_period_shift();
	prop_change_shift(&vm_completions, shift);
	prop_change_shift(&vm_dirties, shift);
}

int dirty_background_ratio_handler(struct ctl_table *table, int write,
161
		void __user *buffer, size_t *lenp,
162
163
164
165
		loff_t *ppos)
{
	int ret;

166
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
167
168
169
170
171
172
	if (ret == 0 && write)
		dirty_background_bytes = 0;
	return ret;
}

int dirty_background_bytes_handler(struct ctl_table *table, int write,
173
		void __user *buffer, size_t *lenp,
174
175
176
177
		loff_t *ppos)
{
	int ret;

178
	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
179
180
181
182
183
	if (ret == 0 && write)
		dirty_background_ratio = 0;
	return ret;
}

184
int dirty_ratio_handler(struct ctl_table *table, int write,
185
		void __user *buffer, size_t *lenp,
186
187
188
		loff_t *ppos)
{
	int old_ratio = vm_dirty_ratio;
189
190
	int ret;

191
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
192
	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
193
194
195
196
197
198
199
200
		update_completion_period();
		vm_dirty_bytes = 0;
	}
	return ret;
}


int dirty_bytes_handler(struct ctl_table *table, int write,
201
		void __user *buffer, size_t *lenp,
202
203
		loff_t *ppos)
{
204
	unsigned long old_bytes = vm_dirty_bytes;
205
206
	int ret;

207
	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
208
209
210
	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
		update_completion_period();
		vm_dirty_ratio = 0;
211
212
213
214
215
216
217
218
219
220
	}
	return ret;
}

/*
 * Increment the BDI's writeout completion count and the global writeout
 * completion count. Called from test_clear_page_writeback().
 */
static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
{
221
222
	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
			      bdi->max_prop_frac);
223
224
}

225
226
227
228
229
230
231
232
233
234
void bdi_writeout_inc(struct backing_dev_info *bdi)
{
	unsigned long flags;

	local_irq_save(flags);
	__bdi_writeout_inc(bdi);
	local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(bdi_writeout_inc);

Nick Piggin's avatar
Nick Piggin committed
235
void task_dirty_inc(struct task_struct *tsk)
Peter Zijlstra's avatar
Peter Zijlstra committed
236
237
238
239
{
	prop_inc_single(&vm_dirties, &tsk->dirties);
}

240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
/*
 * Obtain an accurate fraction of the BDI's portion.
 */
static void bdi_writeout_fraction(struct backing_dev_info *bdi,
		long *numerator, long *denominator)
{
	if (bdi_cap_writeback_dirty(bdi)) {
		prop_fraction_percpu(&vm_completions, &bdi->completions,
				numerator, denominator);
	} else {
		*numerator = 0;
		*denominator = 1;
	}
}

/*
 * Clip the earned share of dirty pages to that which is actually available.
 * This avoids exceeding the total dirty_limit when the floating averages
 * fluctuate too quickly.
 */
260
261
static void clip_bdi_dirty_limit(struct backing_dev_info *bdi,
		unsigned long dirty, unsigned long *pbdi_dirty)
262
{
263
	unsigned long avail_dirty;
264

265
	avail_dirty = global_page_state(NR_FILE_DIRTY) +
266
		 global_page_state(NR_WRITEBACK) +
267
		 global_page_state(NR_UNSTABLE_NFS) +
268
		 global_page_state(NR_WRITEBACK_TEMP);
269

270
271
272
	if (avail_dirty < dirty)
		avail_dirty = dirty - avail_dirty;
	else
273
274
275
276
277
278
279
280
		avail_dirty = 0;

	avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
		bdi_stat(bdi, BDI_WRITEBACK);

	*pbdi_dirty = min(*pbdi_dirty, avail_dirty);
}

Peter Zijlstra's avatar
Peter Zijlstra committed
281
282
283
284
285
286
287
288
289
290
291
292
293
294
static inline void task_dirties_fraction(struct task_struct *tsk,
		long *numerator, long *denominator)
{
	prop_fraction_single(&vm_dirties, &tsk->dirties,
				numerator, denominator);
}

/*
 * scale the dirty limit
 *
 * task specific dirty limit:
 *
 *   dirty -= (dirty/8) * p_{t}
 */
295
static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
Peter Zijlstra's avatar
Peter Zijlstra committed
296
297
{
	long numerator, denominator;
298
	unsigned long dirty = *pdirty;
Peter Zijlstra's avatar
Peter Zijlstra committed
299
300
301
302
303
304
305
306
307
308
309
310
311
	u64 inv = dirty >> 3;

	task_dirties_fraction(tsk, &numerator, &denominator);
	inv *= numerator;
	do_div(inv, denominator);

	dirty -= inv;
	if (dirty < *pdirty/2)
		dirty = *pdirty/2;

	*pdirty = dirty;
}

312
313
314
315
316
317
318
319
320
/*
 *
 */
static unsigned int bdi_min_ratio;

int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
	int ret = 0;

321
	spin_lock_bh(&bdi_lock);
322
	if (min_ratio > bdi->max_ratio) {
323
		ret = -EINVAL;
324
325
326
327
328
329
330
331
332
	} else {
		min_ratio -= bdi->min_ratio;
		if (bdi_min_ratio + min_ratio < 100) {
			bdi_min_ratio += min_ratio;
			bdi->min_ratio += min_ratio;
		} else {
			ret = -EINVAL;
		}
	}
333
	spin_unlock_bh(&bdi_lock);
334
335
336
337
338
339
340
341
342
343
344

	return ret;
}

int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
	int ret = 0;

	if (max_ratio > 100)
		return -EINVAL;

345
	spin_lock_bh(&bdi_lock);
346
347
348
349
350
351
	if (bdi->min_ratio > max_ratio) {
		ret = -EINVAL;
	} else {
		bdi->max_ratio = max_ratio;
		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
	}
352
	spin_unlock_bh(&bdi_lock);
353
354
355

	return ret;
}
356
EXPORT_SYMBOL(bdi_set_max_ratio);
357

Linus Torvalds's avatar
Linus Torvalds committed
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
/*
 * Work out the current dirty-memory clamping and background writeout
 * thresholds.
 *
 * The main aim here is to lower them aggressively if there is a lot of mapped
 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
 * pages.  It is better to clamp down on writers than to start swapping, and
 * performing lots of scanning.
 *
 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
 *
 * We don't permit the clamping level to fall below 5% - that is getting rather
 * excessive.
 *
 * We make sure that the background writeout level is below the adjusted
 * clamping level.
 */
375
376
377
378
379
380
381

static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
	int node;
	unsigned long x = 0;

382
	for_each_node_state(node, N_HIGH_MEMORY) {
383
384
385
		struct zone *z =
			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];

386
387
		x += zone_page_state(z, NR_FREE_PAGES) +
		     zone_reclaimable_pages(z);
388
389
390
391
392
393
394
395
396
397
398
399
400
	}
	/*
	 * Make sure that the number of highmem pages is never larger
	 * than the number of the total dirtyable memory. This can only
	 * occur in very strange VM situations but we want to make sure
	 * that this does not occur.
	 */
	return min(x, total);
#else
	return 0;
#endif
}

Steven Rostedt's avatar
Steven Rostedt committed
401
402
403
404
405
406
407
/**
 * determine_dirtyable_memory - amount of memory that may be used
 *
 * Returns the numebr of pages that can currently be freed and used
 * by the kernel for direct mappings.
 */
unsigned long determine_dirtyable_memory(void)
408
409
410
{
	unsigned long x;

411
	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
412
413
414
415

	if (!vm_highmem_is_dirtyable)
		x -= highmem_dirtyable_memory(x);

416
417
418
	return x + 1;	/* Ensure that we never return 0 */
}

419
void
420
421
get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
		 unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
Linus Torvalds's avatar
Linus Torvalds committed
422
{
423
424
	unsigned long background;
	unsigned long dirty;
425
	unsigned long available_memory = determine_dirtyable_memory();
Linus Torvalds's avatar
Linus Torvalds committed
426
427
	struct task_struct *tsk;

428
429
430
431
432
433
434
435
436
437
	if (vm_dirty_bytes)
		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
	else {
		int dirty_ratio;

		dirty_ratio = vm_dirty_ratio;
		if (dirty_ratio < 5)
			dirty_ratio = 5;
		dirty = (dirty_ratio * available_memory) / 100;
	}
Linus Torvalds's avatar
Linus Torvalds committed
438

439
440
441
442
	if (dirty_background_bytes)
		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
	else
		background = (dirty_background_ratio * available_memory) / 100;
Linus Torvalds's avatar
Linus Torvalds committed
443

444
445
	if (background >= dirty)
		background = dirty / 2;
Linus Torvalds's avatar
Linus Torvalds committed
446
447
448
449
450
451
452
	tsk = current;
	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
		background += background / 4;
		dirty += dirty / 4;
	}
	*pbackground = background;
	*pdirty = dirty;
453
454

	if (bdi) {
455
		u64 bdi_dirty;
456
457
458
459
460
461
462
		long numerator, denominator;

		/*
		 * Calculate this BDI's share of the dirty ratio.
		 */
		bdi_writeout_fraction(bdi, &numerator, &denominator);

463
		bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
464
465
		bdi_dirty *= numerator;
		do_div(bdi_dirty, denominator);
466
		bdi_dirty += (dirty * bdi->min_ratio) / 100;
467
468
		if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
			bdi_dirty = dirty * bdi->max_ratio / 100;
469
470
471

		*pbdi_dirty = bdi_dirty;
		clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
Peter Zijlstra's avatar
Peter Zijlstra committed
472
		task_dirty_limit(current, pbdi_dirty);
473
	}
Linus Torvalds's avatar
Linus Torvalds committed
474
475
476
477
478
479
}

/*
 * balance_dirty_pages() must be called by processes which are generating dirty
 * data.  It looks at the number of dirty pages in the machine and will force
 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
480
481
 * If we're over `background_thresh' then the writeback threads are woken to
 * perform some writeout.
Linus Torvalds's avatar
Linus Torvalds committed
482
 */
483
484
static void balance_dirty_pages(struct address_space *mapping,
				unsigned long write_chunk)
Linus Torvalds's avatar
Linus Torvalds committed
485
{
486
487
	long nr_reclaimable, bdi_nr_reclaimable;
	long nr_writeback, bdi_nr_writeback;
488
489
490
	unsigned long background_thresh;
	unsigned long dirty_thresh;
	unsigned long bdi_thresh;
Linus Torvalds's avatar
Linus Torvalds committed
491
	unsigned long pages_written = 0;
492
	unsigned long pause = 1;
Linus Torvalds's avatar
Linus Torvalds committed
493
494
495
496
497
498
499
500
501

	struct backing_dev_info *bdi = mapping->backing_dev_info;

	for (;;) {
		struct writeback_control wbc = {
			.bdi		= bdi,
			.sync_mode	= WB_SYNC_NONE,
			.older_than_this = NULL,
			.nr_to_write	= write_chunk,
502
			.range_cyclic	= 1,
Linus Torvalds's avatar
Linus Torvalds committed
503
504
		};

505
506
		get_dirty_limits(&background_thresh, &dirty_thresh,
				&bdi_thresh, bdi);
507
508
509
510
511

		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
					global_page_state(NR_UNSTABLE_NFS);
		nr_writeback = global_page_state(NR_WRITEBACK);

512
513
		bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
		bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
514

515
516
		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
			break;
Linus Torvalds's avatar
Linus Torvalds committed
517

518
519
520
521
522
523
524
525
526
		/*
		 * Throttle it only when the background writeback cannot
		 * catch-up. This avoids (excessively) small writeouts
		 * when the bdi limits are ramping up.
		 */
		if (nr_reclaimable + nr_writeback <
				(background_thresh + dirty_thresh) / 2)
			break;

527
528
		if (!bdi->dirty_exceeded)
			bdi->dirty_exceeded = 1;
Linus Torvalds's avatar
Linus Torvalds committed
529
530
531
532
533
534

		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
		 * Unstable writes are a feature of certain networked
		 * filesystems (i.e. NFS) in which data may have been
		 * written to the server's write cache, but has not yet
		 * been flushed to permanent storage.
535
536
537
		 * Only move pages to writeback if this bdi is over its
		 * threshold otherwise wait until the disk writes catch
		 * up.
Linus Torvalds's avatar
Linus Torvalds committed
538
		 */
539
		if (bdi_nr_reclaimable > bdi_thresh) {
540
			writeback_inodes_wbc(&wbc);
Linus Torvalds's avatar
Linus Torvalds committed
541
			pages_written += write_chunk - wbc.nr_to_write;
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
			get_dirty_limits(&background_thresh, &dirty_thresh,
				       &bdi_thresh, bdi);
		}

		/*
		 * In order to avoid the stacked BDI deadlock we need
		 * to ensure we accurately count the 'dirty' pages when
		 * the threshold is low.
		 *
		 * Otherwise it would be possible to get thresh+n pages
		 * reported dirty, even though there are thresh-m pages
		 * actually dirty; with m+n sitting in the percpu
		 * deltas.
		 */
		if (bdi_thresh < 2*bdi_stat_error(bdi)) {
			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
			bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
		} else if (bdi_nr_reclaimable) {
			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
			bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
Linus Torvalds's avatar
Linus Torvalds committed
562
		}
563
564
565
566
567
568

		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
			break;
		if (pages_written >= write_chunk)
			break;		/* We've done our duty */

569
570
		__set_current_state(TASK_INTERRUPTIBLE);
		io_schedule_timeout(pause);
571
572
573
574
575
576
577
578

		/*
		 * Increase the delay for each loop, up to our previous
		 * default of taking a 100ms nap.
		 */
		pause <<= 1;
		if (pause > HZ / 10)
			pause = HZ / 10;
Linus Torvalds's avatar
Linus Torvalds committed
579
580
	}

581
582
583
	if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
			bdi->dirty_exceeded)
		bdi->dirty_exceeded = 0;
Linus Torvalds's avatar
Linus Torvalds committed
584
585

	if (writeback_in_progress(bdi))
586
		return;
Linus Torvalds's avatar
Linus Torvalds committed
587
588
589
590
591
592
593
594
595
596

	/*
	 * In laptop mode, we wait until hitting the higher threshold before
	 * starting background writeout, and then write out all the way down
	 * to the lower threshold.  So slow writers cause minimal disk activity.
	 *
	 * In normal mode, we start background writeout at the lower
	 * background_thresh, to keep the amount of dirty memory low.
	 */
	if ((laptop_mode && pages_written) ||
597
598
	    (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
			       + global_page_state(NR_UNSTABLE_NFS))
599
					  > background_thresh)))
600
		bdi_start_writeback(bdi, NULL, 0);
Linus Torvalds's avatar
Linus Torvalds committed
601
602
}

603
void set_page_dirty_balance(struct page *page, int page_mkwrite)
604
{
605
	if (set_page_dirty(page) || page_mkwrite) {
606
607
608
609
610
611
612
		struct address_space *mapping = page_mapping(page);

		if (mapping)
			balance_dirty_pages_ratelimited(mapping);
	}
}

613
614
static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;

Linus Torvalds's avatar
Linus Torvalds committed
615
/**
616
 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
617
 * @mapping: address_space which was dirtied
618
 * @nr_pages_dirtied: number of pages which the caller has just dirtied
Linus Torvalds's avatar
Linus Torvalds committed
619
620
621
622
623
624
625
626
627
628
 *
 * Processes which are dirtying memory should call in here once for each page
 * which was newly dirtied.  The function will periodically check the system's
 * dirty state and will initiate writeback if needed.
 *
 * On really big machines, get_writeback_state is expensive, so try to avoid
 * calling it too often (ratelimiting).  But once we're over the dirty memory
 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 * from overshooting the limit by (ratelimit_pages) each.
 */
629
630
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
					unsigned long nr_pages_dirtied)
Linus Torvalds's avatar
Linus Torvalds committed
631
{
632
633
	unsigned long ratelimit;
	unsigned long *p;
Linus Torvalds's avatar
Linus Torvalds committed
634
635

	ratelimit = ratelimit_pages;
636
	if (mapping->backing_dev_info->dirty_exceeded)
Linus Torvalds's avatar
Linus Torvalds committed
637
638
639
640
641
642
		ratelimit = 8;

	/*
	 * Check the rate limiting. Also, we do not want to throttle real-time
	 * tasks in balance_dirty_pages(). Period.
	 */
643
	preempt_disable();
644
	p =  &__get_cpu_var(bdp_ratelimits);
645
646
	*p += nr_pages_dirtied;
	if (unlikely(*p >= ratelimit)) {
647
		ratelimit = sync_writeback_pages(*p);
648
649
		*p = 0;
		preempt_enable();
650
		balance_dirty_pages(mapping, ratelimit);
Linus Torvalds's avatar
Linus Torvalds committed
651
652
		return;
	}
653
	preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
654
}
655
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
Linus Torvalds's avatar
Linus Torvalds committed
656

657
void throttle_vm_writeout(gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
658
{
659
660
	unsigned long background_thresh;
	unsigned long dirty_thresh;
Linus Torvalds's avatar
Linus Torvalds committed
661
662

        for ( ; ; ) {
663
		get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
664
665
666
667
668
669
670

                /*
                 * Boost the allowable dirty threshold a bit for page
                 * allocators so they don't get DoS'ed by heavy writers
                 */
                dirty_thresh += dirty_thresh / 10;      /* wheeee... */

671
672
673
                if (global_page_state(NR_UNSTABLE_NFS) +
			global_page_state(NR_WRITEBACK) <= dirty_thresh)
                        	break;
674
                congestion_wait(BLK_RW_ASYNC, HZ/10);
675
676
677
678
679
680
681
682

		/*
		 * The caller might hold locks which can prevent IO completion
		 * or progress in the filesystem.  So we cannot just sit here
		 * waiting for IO to complete.
		 */
		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
			break;
Linus Torvalds's avatar
Linus Torvalds committed
683
684
685
686
687
        }
}

static void laptop_timer_fn(unsigned long unused);

688
static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
Linus Torvalds's avatar
Linus Torvalds committed
689
690
691
692
693

/*
 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
 */
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
694
	void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds's avatar
Linus Torvalds committed
695
{
696
	proc_dointvec(table, write, buffer, length, ppos);
Linus Torvalds's avatar
Linus Torvalds committed
697
698
699
	return 0;
}

700
static void do_laptop_sync(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
701
{
702
703
	wakeup_flusher_threads(0);
	kfree(work);
Linus Torvalds's avatar
Linus Torvalds committed
704
705
706
707
}

static void laptop_timer_fn(unsigned long unused)
{
708
709
710
711
712
713
714
	struct work_struct *work;

	work = kmalloc(sizeof(*work), GFP_ATOMIC);
	if (work) {
		INIT_WORK(work, do_laptop_sync);
		schedule_work(work);
	}
Linus Torvalds's avatar
Linus Torvalds committed
715
716
717
718
719
720
721
722
723
}

/*
 * We've spun up the disk and we're in laptop mode: schedule writeback
 * of all dirty data a few seconds from now.  If the flush is already scheduled
 * then push it back - the user is still using the disk.
 */
void laptop_io_completion(void)
{
724
	mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
Linus Torvalds's avatar
Linus Torvalds committed
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
}

/*
 * We're in laptop mode and we've just synced. The sync's writes will have
 * caused another writeback to be scheduled by laptop_io_completion.
 * Nothing needs to be written back anymore, so we unschedule the writeback.
 */
void laptop_sync_completion(void)
{
	del_timer(&laptop_mode_wb_timer);
}

/*
 * If ratelimit_pages is too high then we can get into dirty-data overload
 * if a large number of processes all perform writes at the same time.
 * If it is too low then SMP machines will call the (expensive)
 * get_writeback_state too often.
 *
 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
 * thresholds before writeback cuts in.
 *
 * But the limit should not be set too high.  Because it also controls the
 * amount of memory which the balance_dirty_pages() caller has to write back.
 * If this is too large then the caller will block on the IO queue all the
 * time.  So limit it to four megabytes - the balance_dirty_pages() caller
 * will write six megabyte chunks, max.
 */

754
void writeback_set_ratelimit(void)
Linus Torvalds's avatar
Linus Torvalds committed
755
{
756
	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
Linus Torvalds's avatar
Linus Torvalds committed
757
758
759
760
761
762
	if (ratelimit_pages < 16)
		ratelimit_pages = 16;
	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
}

763
static int __cpuinit
Linus Torvalds's avatar
Linus Torvalds committed
764
765
ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
{
766
	writeback_set_ratelimit();
767
	return NOTIFY_DONE;
Linus Torvalds's avatar
Linus Torvalds committed
768
769
}

770
static struct notifier_block __cpuinitdata ratelimit_nb = {
Linus Torvalds's avatar
Linus Torvalds committed
771
772
773
774
775
	.notifier_call	= ratelimit_handler,
	.next		= NULL,
};

/*
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
 * Called early on to tune the page writeback dirty limits.
 *
 * We used to scale dirty pages according to how total memory
 * related to pages that could be allocated for buffers (by
 * comparing nr_free_buffer_pages() to vm_total_pages.
 *
 * However, that was when we used "dirty_ratio" to scale with
 * all memory, and we don't do that any more. "dirty_ratio"
 * is now applied to total non-HIGHPAGE memory (by subtracting
 * totalhigh_pages from vm_total_pages), and as such we can't
 * get into the old insane situation any more where we had
 * large amounts of dirty pages compared to a small amount of
 * non-HIGHMEM memory.
 *
 * But we might still want to scale the dirty_ratio by how
 * much memory the box has..
Linus Torvalds's avatar
Linus Torvalds committed
792
793
794
 */
void __init page_writeback_init(void)
{
795
796
	int shift;

797
	writeback_set_ratelimit();
Linus Torvalds's avatar
Linus Torvalds committed
798
	register_cpu_notifier(&ratelimit_nb);
799
800
801

	shift = calc_period_shift();
	prop_descriptor_init(&vm_completions, shift);
Peter Zijlstra's avatar
Peter Zijlstra committed
802
	prop_descriptor_init(&vm_dirties, shift);
Linus Torvalds's avatar
Linus Torvalds committed
803
804
}

805
/**
806
 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
807
808
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
809
810
 * @writepage: function called for each page
 * @data: data passed to writepage function
811
 *
812
 * If a page is already under I/O, write_cache_pages() skips it, even
813
814
815
816
817
818
819
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 * and msync() need to guarantee that all the data which was dirty at the time
 * the call was made get new I/O started against them.  If wbc->sync_mode is
 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 * existing IO to complete.
 */
820
821
822
int write_cache_pages(struct address_space *mapping,
		      struct writeback_control *wbc, writepage_t writepage,
		      void *data)
823
824
825
826
827
828
{
	struct backing_dev_info *bdi = mapping->backing_dev_info;
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
Nick Piggin's avatar
Nick Piggin committed
829
	pgoff_t uninitialized_var(writeback_index);
830
831
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
832
	pgoff_t done_index;
Nick Piggin's avatar
Nick Piggin committed
833
	int cycled;
834
	int range_whole = 0;
835
	long nr_to_write = wbc->nr_to_write;
836
837
838
839
840
841
842
843

	if (wbc->nonblocking && bdi_write_congested(bdi)) {
		wbc->encountered_congestion = 1;
		return 0;
	}

	pagevec_init(&pvec, 0);
	if (wbc->range_cyclic) {
Nick Piggin's avatar
Nick Piggin committed
844
845
846
847
848
849
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
850
851
852
853
854
855
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
Nick Piggin's avatar
Nick Piggin committed
856
		cycled = 1; /* ignore range_cyclic tests */
857
858
	}
retry:
859
	done_index = index;
Nick Piggin's avatar
Nick Piggin committed
860
861
862
863
864
865
866
867
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
			      PAGECACHE_TAG_DIRTY,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
		if (nr_pages == 0)
			break;
868
869
870
871
872

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			/*
873
874
875
876
877
			 * At this point, the page may be truncated or
			 * invalidated (changing page->mapping to NULL), or
			 * even swizzled back from swapper_space to tmpfs file
			 * mapping. However, page->index will not change
			 * because we have a reference on the page.
878
			 */
879
880
881
882
883
884
885
886
887
888
889
			if (page->index > end) {
				/*
				 * can't be range_cyclic (1st pass) because
				 * end == -1 in that case.
				 */
				done = 1;
				break;
			}

			done_index = page->index + 1;

890
891
			lock_page(page);

Nick Piggin's avatar
Nick Piggin committed
892
893
894
895
896
897
898
899
			/*
			 * Page truncated or invalidated. We can freely skip it
			 * then, even for data integrity operations: the page
			 * has disappeared concurrently, so there could be no
			 * real expectation of this data interity operation
			 * even if there is now a new, dirty page at the same
			 * pagecache address.
			 */
900
			if (unlikely(page->mapping != mapping)) {
Nick Piggin's avatar
Nick Piggin committed
901
continue_unlock:
902
903
904
905
				unlock_page(page);
				continue;
			}

906
907
908
909
910
911
912
913
914
915
916
			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
					wait_on_page_writeback(page);
				else
					goto continue_unlock;
			}
917

918
919
			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
Nick Piggin's avatar
Nick Piggin committed
920
				goto continue_unlock;
921

922
			ret = (*writepage)(page, wbc, data);
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					/*
					 * done_index is set past this page,
					 * so media errors will not choke
					 * background writeout for the entire
					 * file. This has consequences for
					 * range_cyclic semantics (ie. it may
					 * not be suitable for data integrity
					 * writeout).
					 */
					done = 1;
					break;
				}
 			}

942
			if (nr_to_write > 0) {
943
				nr_to_write--;
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
				if (nr_to_write == 0 &&
				    wbc->sync_mode == WB_SYNC_NONE) {
					/*
					 * We stop writing back only if we are
					 * not doing integrity sync. In case of
					 * integrity sync we have to keep going
					 * because someone may be concurrently
					 * dirtying pages, and we might have
					 * synced a lot of newly appeared dirty
					 * pages, but have not synced all of the
					 * old dirty pages.
					 */
					done = 1;
					break;
				}
959
			}
960

961
962
963
			if (wbc->nonblocking && bdi_write_congested(bdi)) {
				wbc->encountered_congestion = 1;
				done = 1;
964
				break;
965
966
967
968
969
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}
970
	if (!cycled && !done) {
971
		/*
Nick Piggin's avatar
Nick Piggin committed
972
		 * range_cyclic:
973
974
975
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
Nick Piggin's avatar
Nick Piggin committed
976
		cycled = 1;
977
		index = 0;
Nick Piggin's avatar
Nick Piggin committed
978
		end = writeback_index - 1;
979
980
		goto retry;
	}
981
982
	if (!wbc->no_nrwrite_index_update) {
		if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
983
			mapping->writeback_index = done_index;
984
985
		wbc->nr_to_write = nr_to_write;
	}
986

987
988
	return ret;
}
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
EXPORT_SYMBOL(write_cache_pages);

/*
 * Function used by generic_writepages to call the real writepage
 * function and set the mapping flags on error
 */
static int __writepage(struct page *page, struct writeback_control *wbc,
		       void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

/**
 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 *
 * This is a library function, which implements the writepages()
 * address_space_operation.
 */
int generic_writepages(struct address_space *mapping,
		       struct writeback_control *wbc)
{
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

	return write_cache_pages(mapping, wbc, __writepage, mapping);
}
1021
1022
1023

EXPORT_SYMBOL(generic_writepages);

Linus Torvalds's avatar
Linus Torvalds committed
1024
1025
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
1026
1027
	int ret;

Linus Torvalds's avatar
Linus Torvalds committed
1028
1029
1030
	if (wbc->nr_to_write <= 0)
		return 0;
	if (mapping->a_ops->writepages)
1031
		ret = mapping->a_ops->writepages(mapping, wbc);
1032
1033
1034
	else
		ret = generic_writepages(mapping, wbc);
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
1035
1036
1037
1038
}

/**
 * write_one_page - write out a single page and optionally wait on I/O
1039
1040
 * @page: the page to write
 * @wait: if true, wait on writeout
Linus Torvalds's avatar
Linus Torvalds committed
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
 *
 * The page must be locked by the caller and will be unlocked upon return.
 *
 * write_one_page() returns a negative error code if I/O failed.
 */
int write_one_page(struct page *page, int wait)
{
	struct address_space *mapping = page->mapping;
	int ret = 0;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = 1,
	};

	BUG_ON(!PageLocked(page));

	if (wait)
		wait_on_page_writeback(page);

	if (clear_page_dirty_for_io(page)) {
		page_cache_get(page);
		ret = mapping->a_ops->writepage(page, &wbc);
		if (ret == 0 && wait) {
			wait_on_page_writeback(page);
			if (PageError(page))
				ret = -EIO;
		}
		page_cache_release(page);
	} else {
		unlock_page(page);
	}
	return ret;
}
EXPORT_SYMBOL(write_one_page);

1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
/*
 * For address_spaces which do not use buffers nor write back.
 */
int __set_page_dirty_no_writeback(struct page *page)
{
	if (!PageDirty(page))
		SetPageDirty(page);
	return 0;
}

1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
/*
 * Helper function for set_page_dirty family.
 * NOTE: This relies on being atomic wrt interrupts.
 */
void account_page_dirtied(struct page *page, struct address_space *mapping)
{
	if (mapping_cap_account_dirty(mapping)) {
		__inc_zone_page_state(page, NR_FILE_DIRTY);
		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
		task_dirty_inc(current);
		task_io_account_write(PAGE_CACHE_SIZE);
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
/*
 * For address_spaces which do not use buffers.  Just tag the page as dirty in
 * its radix tree.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 *
 * Most callers have locked the page, which pins the address_space in memory.
 * But zap_pte_range() does not lock the page, however in that case the
 * mapping is pinned by the vma's ->vm_file reference.
 *
 * We take care to handle the case where the page was truncated from the
Simon Arlott's avatar
Simon Arlott committed
1113
 * mapping by re-checking page_mapping() inside tree_lock.
Linus Torvalds's avatar
Linus Torvalds committed
1114
1115
1116
1117
1118
1119
1120
 */
int __set_page_dirty_nobuffers(struct page *page)
{
	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page_mapping(page);
		struct address_space *mapping2;

1121
1122
1123
		if (!mapping)
			return 1;

Nick Piggin's avatar
Nick Piggin committed
1124
		spin_lock_irq(&mapping->tree_lock);
1125
1126
1127
		mapping2 = page_mapping(page);
		if (mapping2) { /* Race with truncate? */
			BUG_ON(mapping2 != mapping);
1128
			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1129
			account_page_dirtied(page, mapping);
1130
1131
1132
			radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
		}
Nick Piggin's avatar
Nick Piggin committed
1133
		spin_unlock_irq(&mapping->tree_lock);
1134
1135
1136
		if (mapping->host) {
			/* !PageAnon && !swapper_space */
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Linus Torvalds's avatar
Linus Torvalds committed
1137
		}
1138
		return 1;
Linus Torvalds's avatar
Linus Torvalds committed
1139
	}
1140
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);

/*
 * When a writepage implementation decides that it doesn't want to write this
 * page for some reason, it should redirty the locked page via
 * redirty_page_for_writepage() and it should then unlock the page and return 0
 */
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
	wbc->pages_skipped++;
	return __set_page_dirty_nobuffers(page);
}
EXPORT_SYMBOL(redirty_page_for_writepage);

/*
1157
1158
1159
1160
1161
1162
1163
 * Dirty a page.
 *
 * For pages with a mapping this should be done under the page lock
 * for the benefit of asynchronous memory errors who prefer a consistent
 * dirty state. This rule can be broken in some special cases,
 * but should be better not to.
 *
Linus Torvalds's avatar
Linus Torvalds committed
1164
1165
1166
 * If the mapping doesn't provide a set_page_dirty a_op, then
 * just fall through and assume that it wants buffer_heads.
 */
Nick Piggin's avatar
Nick Piggin committed
1167
int set_page_dirty(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
1168
1169
1170
1171
1172
{
	struct address_space *mapping = page_mapping(page);

	if (likely(mapping)) {
		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1173
1174
1175
1176
1177
#ifdef CONFIG_BLOCK
		if (!spd)
			spd = __set_page_dirty_buffers;
#endif
		return (*spd)(page);
Linus Torvalds's avatar
Linus Torvalds committed
1178
	}
1179
1180
1181
1182
	if (!PageDirty(page)) {
		if (!TestSetPageDirty(page))
			return 1;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
	return 0;
}
EXPORT_SYMBOL(set_page_dirty);

/*
 * set_page_dirty() is racy if the caller has no reference against
 * page->mapping->host, and if the page is unlocked.  This is because another
 * CPU could truncate the page off the mapping and then free the mapping.
 *
 * Usually, the page _is_ locked, or the caller is a user-space process which
 * holds a reference on the inode by having an open file.
 *
 * In other cases, the page should be locked before running set_page_dirty().
 */
int set_page_dirty_lock(struct page *page)
{
	int ret;

1201
	lock_page_nosync(page);
Linus Torvalds's avatar
Linus Torvalds committed
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
	ret = set_page_dirty(page);
	unlock_page(page);
	return ret;
}
EXPORT_SYMBOL(set_page_dirty_lock);

/*
 * Clear a page's dirty flag, while caring for dirty memory accounting.
 * Returns true if the page was previously dirty.
 *
 * This is for preparing to put the page under writeout.  We leave the page
 * tagged as dirty in the radix tree so that a concurrent write-for-sync
 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
 * implementation will run either set_page_writeback() or set_page_dirty(),
 * at which stage we bring the page's dirty flag and radix-tree dirty tag
 * back into sync.
 *
 * This incoherency between the page's dirty flag and radix-tree tag is
 * unfortunate, but it only exists while the page is locked.
 */
int clear_page_dirty_for_io(struct page *page)
{
	struct address_space *mapping = page_mapping(page);

1226
1227
	BUG_ON(!PageLocked(page));

1228
	ClearPageReclaim(page);
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
	if (mapping && mapping_cap_account_dirty(mapping)) {
		/*
		 * Yes, Virginia, this is indeed insane.
		 *
		 * We use this sequence to make sure that
		 *  (a) we account for dirty stats properly
		 *  (b) we tell the low-level filesystem to
		 *      mark the whole page dirty if it was
		 *      dirty in a pagetable. Only to then
		 *  (c) clean the page again and return 1 to
		 *      cause the writeback.
		 *
		 * This way we avoid all nasty races with the
		 * dirty bit in multiple places and clearing
		 * them concurrently from different threads.
		 *
		 * Note! Normally the "set_page_dirty(page)"
		 * has no effect on the actual dirty bit - since
		 * that will already usually be set. But we
		 * need the side effects, and it can help us
		 * avoid races.
		 *
		 * We basically use the page "master dirty bit"
		 * as a serialization point for all the different
		 * threads doing their things.
		 */
		if (page_mkclean(page))
			set_page_dirty(page);
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
		/*
		 * We carefully synchronise fault handlers against
		 * installing a dirty pte and marking the page dirty
		 * at this point. We do this by having them hold the
		 * page lock at some point after installing their
		 * pte, but before marking the page dirty.
		 * Pages are always locked coming in here, so we get
		 * the desired exclusion. See mm/memory.c:do_wp_page()
		 * for more comments.
		 */
1267
		if (TestClearPageDirty(page)) {
1268
			dec_zone_page_state(page, NR_FILE_DIRTY);
1269
1270
			dec_bdi_stat(mapping->backing_dev_info,
					BDI_RECLAIMABLE);
1271
			return 1;
Linus Torvalds's avatar
Linus Torvalds committed
1272
		}
1273
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1274
	}
1275
	return TestClearPageDirty(page);
Linus Torvalds's avatar
Linus Torvalds committed
1276
}
1277
EXPORT_SYMBOL(clear_page_dirty_for_io);
Linus Torvalds's avatar
Linus Torvalds committed
1278
1279
1280
1281
1282
1283
1284

int test_clear_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	int ret;

	if (mapping) {
1285
		struct backing_dev_info *bdi = mapping->backing_dev_info;
Linus Torvalds's avatar
Linus Torvalds committed
1286
1287
		unsigned long flags;

Nick Piggin's avatar
Nick Piggin committed
1288
		spin_lock_irqsave(&mapping->tree_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1289
		ret = TestClearPageWriteback(page);
1290
		if (ret) {
Linus Torvalds's avatar
Linus Torvalds committed
1291
1292
1293
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
1294
			if (bdi_cap_account_writeback(bdi)) {
1295
				__dec_bdi_stat(bdi, BDI_WRITEBACK);
1296
1297
				__bdi_writeout_inc(bdi);
			}
1298
		}
Nick Piggin's avatar
Nick Piggin committed
1299
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1300
1301
1302
	} else {
		ret = TestClearPageWriteback(page);
	}
1303
1304
	if (ret)
		dec_zone_page_state(page, NR_WRITEBACK);
Linus Torvalds's avatar
Linus Torvalds committed
1305
1306
1307
1308
1309
1310
1311
1312
1313
	return ret;
}

int test_set_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	int ret;

	if (mapping) {
1314
		struct backing_dev_info *bdi = mapping->backing_dev_info;
Linus Torvalds's avatar
Linus Torvalds committed
1315
1316
		unsigned long flags;

Nick Piggin's avatar
Nick Piggin committed
1317
		spin_lock_irqsave(&mapping->tree_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1318
		ret = TestSetPageWriteback(page);
1319
		if (!ret) {
Linus Torvalds's avatar
Linus Torvalds committed
1320
1321
1322
			radix_tree_tag_set(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
1323
			if (bdi_cap_account_writeback(bdi))
1324
1325
				__inc_bdi_stat(bdi, BDI_WRITEBACK);
		}
Linus Torvalds's avatar
Linus Torvalds committed
1326
1327
1328
1329
		if (!PageDirty(page))
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
Nick Piggin's avatar
Nick Piggin committed
1330
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1331
1332
1333
	} else {
		ret = TestSetPageWriteback(page);
	}
1334
1335
	if (!ret)
		inc_zone_page_state(page, NR_WRITEBACK);
Linus Torvalds's avatar
Linus Torvalds committed
1336
1337
1338
1339
1340
1341
	return ret;

}
EXPORT_SYMBOL(test_set_page_writeback);

/*
1342
 * Return true if any of the pages in the mapping are marked with the
Linus Torvalds's avatar
Linus Torvalds committed
1343
1344
1345
1346
1347
 * passed tag.
 */
int mapping_tagged(struct address_space *mapping, int tag)
{
	int ret;
1348
	rcu_read_lock();
Linus Torvalds's avatar
Linus Torvalds committed
1349
	ret = radix_tree_tagged(&mapping->page_tree, tag);
1350
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
1351
1352
1353
	return ret;
}
EXPORT_SYMBOL(mapping_tagged);