Commit 042a3082 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm/khugepaged: minor reorderings in collapse_shmem()

Several cleanups in collapse_shmem(): most of which probably do not
really matter, beyond doing things in a more familiar and reassuring
order.  Simplify the failure gotos in the main loop, and on success
update stats while interrupts still disabled from the last iteration.

Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261526400.2275@eggly.anvils
Fixes: f3f0e1d2 ("khugepaged: add support of collapse for tmpfs/shmem pages")
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: <stable@vger.kernel.org>	[4.8+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2af8ff29
...@@ -1329,10 +1329,10 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1329,10 +1329,10 @@ static void collapse_shmem(struct mm_struct *mm,
goto out; goto out;
} }
__SetPageLocked(new_page);
__SetPageSwapBacked(new_page);
new_page->index = start; new_page->index = start;
new_page->mapping = mapping; new_page->mapping = mapping;
__SetPageSwapBacked(new_page);
__SetPageLocked(new_page);
BUG_ON(!page_ref_freeze(new_page, 1)); BUG_ON(!page_ref_freeze(new_page, 1));
/* /*
...@@ -1366,13 +1366,13 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1366,13 +1366,13 @@ static void collapse_shmem(struct mm_struct *mm,
if (index == start) { if (index == start) {
if (!xas_next_entry(&xas, end - 1)) { if (!xas_next_entry(&xas, end - 1)) {
result = SCAN_TRUNCATED; result = SCAN_TRUNCATED;
break; goto xa_locked;
} }
xas_set(&xas, index); xas_set(&xas, index);
} }
if (!shmem_charge(mapping->host, 1)) { if (!shmem_charge(mapping->host, 1)) {
result = SCAN_FAIL; result = SCAN_FAIL;
break; goto xa_locked;
} }
xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
nr_none++; nr_none++;
...@@ -1387,13 +1387,12 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1387,13 +1387,12 @@ static void collapse_shmem(struct mm_struct *mm,
result = SCAN_FAIL; result = SCAN_FAIL;
goto xa_unlocked; goto xa_unlocked;
} }
xas_lock_irq(&xas);
xas_set(&xas, index);
} else if (trylock_page(page)) { } else if (trylock_page(page)) {
get_page(page); get_page(page);
xas_unlock_irq(&xas);
} else { } else {
result = SCAN_PAGE_LOCK; result = SCAN_PAGE_LOCK;
break; goto xa_locked;
} }
/* /*
...@@ -1408,11 +1407,10 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1408,11 +1407,10 @@ static void collapse_shmem(struct mm_struct *mm,
result = SCAN_TRUNCATED; result = SCAN_TRUNCATED;
goto out_unlock; goto out_unlock;
} }
xas_unlock_irq(&xas);
if (isolate_lru_page(page)) { if (isolate_lru_page(page)) {
result = SCAN_DEL_PAGE_LRU; result = SCAN_DEL_PAGE_LRU;
goto out_isolate_failed; goto out_unlock;
} }
if (page_mapped(page)) if (page_mapped(page))
...@@ -1432,7 +1430,9 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1432,7 +1430,9 @@ static void collapse_shmem(struct mm_struct *mm,
*/ */
if (!page_ref_freeze(page, 3)) { if (!page_ref_freeze(page, 3)) {
result = SCAN_PAGE_COUNT; result = SCAN_PAGE_COUNT;
goto out_lru; xas_unlock_irq(&xas);
putback_lru_page(page);
goto out_unlock;
} }
/* /*
...@@ -1444,24 +1444,26 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1444,24 +1444,26 @@ static void collapse_shmem(struct mm_struct *mm,
/* Finally, replace with the new page. */ /* Finally, replace with the new page. */
xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
continue; continue;
out_lru:
xas_unlock_irq(&xas);
putback_lru_page(page);
out_isolate_failed:
unlock_page(page);
put_page(page);
goto xa_unlocked;
out_unlock: out_unlock:
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
break; goto xa_unlocked;
} }
xas_unlock_irq(&xas);
__inc_node_page_state(new_page, NR_SHMEM_THPS);
if (nr_none) {
struct zone *zone = page_zone(new_page);
__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
}
xa_locked:
xas_unlock_irq(&xas);
xa_unlocked: xa_unlocked:
if (result == SCAN_SUCCEED) { if (result == SCAN_SUCCEED) {
struct page *page, *tmp; struct page *page, *tmp;
struct zone *zone = page_zone(new_page);
/* /*
* Replacing old pages with new one has succeeded, now we * Replacing old pages with new one has succeeded, now we
...@@ -1476,11 +1478,11 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1476,11 +1478,11 @@ static void collapse_shmem(struct mm_struct *mm,
copy_highpage(new_page + (page->index % HPAGE_PMD_NR), copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
page); page);
list_del(&page->lru); list_del(&page->lru);
unlock_page(page);
page_ref_unfreeze(page, 1);
page->mapping = NULL; page->mapping = NULL;
page_ref_unfreeze(page, 1);
ClearPageActive(page); ClearPageActive(page);
ClearPageUnevictable(page); ClearPageUnevictable(page);
unlock_page(page);
put_page(page); put_page(page);
index++; index++;
} }
...@@ -1489,28 +1491,17 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1489,28 +1491,17 @@ static void collapse_shmem(struct mm_struct *mm,
index++; index++;
} }
local_irq_disable();
__inc_node_page_state(new_page, NR_SHMEM_THPS);
if (nr_none) {
__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
}
local_irq_enable();
/*
* Remove pte page tables, so we can re-fault
* the page as huge.
*/
retract_page_tables(mapping, start);
/* Everything is ready, let's unfreeze the new_page */ /* Everything is ready, let's unfreeze the new_page */
set_page_dirty(new_page);
SetPageUptodate(new_page); SetPageUptodate(new_page);
page_ref_unfreeze(new_page, HPAGE_PMD_NR); page_ref_unfreeze(new_page, HPAGE_PMD_NR);
set_page_dirty(new_page);
mem_cgroup_commit_charge(new_page, memcg, false, true); mem_cgroup_commit_charge(new_page, memcg, false, true);
lru_cache_add_anon(new_page); lru_cache_add_anon(new_page);
unlock_page(new_page);
/*
* Remove pte page tables, so we can re-fault the page as huge.
*/
retract_page_tables(mapping, start);
*hpage = NULL; *hpage = NULL;
khugepaged_pages_collapsed++; khugepaged_pages_collapsed++;
...@@ -1543,8 +1534,8 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1543,8 +1534,8 @@ static void collapse_shmem(struct mm_struct *mm,
xas_store(&xas, page); xas_store(&xas, page);
xas_pause(&xas); xas_pause(&xas);
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
putback_lru_page(page);
unlock_page(page); unlock_page(page);
putback_lru_page(page);
xas_lock_irq(&xas); xas_lock_irq(&xas);
} }
VM_BUG_ON(nr_none); VM_BUG_ON(nr_none);
...@@ -1553,9 +1544,10 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1553,9 +1544,10 @@ static void collapse_shmem(struct mm_struct *mm,
/* Unfreeze new_page, caller would take care about freeing it */ /* Unfreeze new_page, caller would take care about freeing it */
page_ref_unfreeze(new_page, 1); page_ref_unfreeze(new_page, 1);
mem_cgroup_cancel_charge(new_page, memcg, true); mem_cgroup_cancel_charge(new_page, memcg, true);
unlock_page(new_page);
new_page->mapping = NULL; new_page->mapping = NULL;
} }
unlock_page(new_page);
out: out:
VM_BUG_ON(!list_empty(&pagelist)); VM_BUG_ON(!list_empty(&pagelist));
/* TODO: tracepoints */ /* TODO: tracepoints */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment