Commit 906f9cdf authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm/huge_memory: rename freeze_page() to unmap_page()

The term "freeze" is used in several ways in the kernel, and in mm it
has the particular meaning of forcing page refcount temporarily to 0.
freeze_page() is just too confusing a name for a function that unmaps a
page: rename it unmap_page(), and rename unfreeze_page() remap_page().

Went to change the mention of freeze_page() added later in mm/rmap.c,
but found it to be incorrect: ordinary page reclaim reaches there too;
but the substance of the comment still seems correct, so edit it down.

Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261514080.2275@eggly.anvils
Fixes: e9b61f19 ("thp: reintroduce split_huge_page()")
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: <stable@vger.kernel.org>	[4.8+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7c0950d4
...@@ -2350,7 +2350,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -2350,7 +2350,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
} }
} }
static void freeze_page(struct page *page) static void unmap_page(struct page *page)
{ {
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
...@@ -2365,7 +2365,7 @@ static void freeze_page(struct page *page) ...@@ -2365,7 +2365,7 @@ static void freeze_page(struct page *page)
VM_BUG_ON_PAGE(!unmap_success, page); VM_BUG_ON_PAGE(!unmap_success, page);
} }
static void unfreeze_page(struct page *page) static void remap_page(struct page *page)
{ {
int i; int i;
if (PageTransHuge(page)) { if (PageTransHuge(page)) {
...@@ -2483,7 +2483,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, ...@@ -2483,7 +2483,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
unfreeze_page(head); remap_page(head);
for (i = 0; i < HPAGE_PMD_NR; i++) { for (i = 0; i < HPAGE_PMD_NR; i++) {
struct page *subpage = head + i; struct page *subpage = head + i;
...@@ -2664,7 +2664,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) ...@@ -2664,7 +2664,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
} }
/* /*
* Racy check if we can split the page, before freeze_page() will * Racy check if we can split the page, before unmap_page() will
* split PMDs * split PMDs
*/ */
if (!can_split_huge_page(head, &extra_pins)) { if (!can_split_huge_page(head, &extra_pins)) {
...@@ -2673,7 +2673,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) ...@@ -2673,7 +2673,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
} }
mlocked = PageMlocked(page); mlocked = PageMlocked(page);
freeze_page(head); unmap_page(head);
VM_BUG_ON_PAGE(compound_mapcount(head), head); VM_BUG_ON_PAGE(compound_mapcount(head), head);
/* Make sure the page is not on per-CPU pagevec as it takes pin */ /* Make sure the page is not on per-CPU pagevec as it takes pin */
...@@ -2727,7 +2727,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) ...@@ -2727,7 +2727,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
fail: if (mapping) fail: if (mapping)
xa_unlock(&mapping->i_pages); xa_unlock(&mapping->i_pages);
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
unfreeze_page(head); remap_page(head);
ret = -EBUSY; ret = -EBUSY;
} }
......
...@@ -1627,16 +1627,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1627,16 +1627,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
address + PAGE_SIZE); address + PAGE_SIZE);
} else { } else {
/* /*
* We should not need to notify here as we reach this * This is a locked file-backed page, thus it cannot
* case only from freeze_page() itself only call from * be removed from the page cache and replaced by a new
* split_huge_page_to_list() so everything below must * page before mmu_notifier_invalidate_range_end, so no
* be true:
* - page is not anonymous
* - page is locked
*
* So as it is a locked file back page thus it can not
* be remove from the page cache and replace by a new
* page before mmu_notifier_invalidate_range_end so no
* concurrent thread might update its page table to * concurrent thread might update its page table to
* point at new page while a device still is using this * point at new page while a device still is using this
* page. * page.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment