Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • drm/nouveau
  • karolherbst/nouveau
  • pmoreau/nouveau
  • tessiew490/nouveau
  • mohamexiety/nouveau
  • antoniospg100/nouveau
  • dufresnep/nouveau
  • mbrost/nouveau-drm-scheduler
  • jfouquart/nouveau
  • git-bruh/nouveau
  • bskeggs/nouveau
  • AFilius/nouveau-private-bugfix
  • mhenning/linux
13 results
Show changes
......@@ -1921,9 +1921,18 @@ void drain_all_stock(struct mem_cgroup *root_memcg)
static int memcg_hotplug_cpu_dead(unsigned int cpu)
{
struct memcg_stock_pcp *stock;
struct obj_cgroup *old;
unsigned long flags;
stock = &per_cpu(memcg_stock, cpu);
/* drain_obj_stock requires stock_lock */
local_lock_irqsave(&memcg_stock.stock_lock, flags);
old = drain_obj_stock(stock);
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
drain_stock(stock);
obj_cgroup_put(old);
return 0;
}
......@@ -4993,7 +5002,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
swap_cgroup_record(folio, entry);
swap_cgroup_record(folio, mem_cgroup_id(swap_memcg), entry);
folio_unqueue_deferred_split(folio);
folio->memcg_data = 0;
......@@ -5055,7 +5064,7 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
mem_cgroup_id_get_many(memcg, nr_pages - 1);
mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
swap_cgroup_record(folio, entry);
swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
return 0;
}
......
......@@ -518,15 +518,13 @@ static int __folio_migrate_mapping(struct address_space *mapping,
if (folio_test_anon(folio) && folio_test_large(folio))
mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
folio_ref_add(newfolio, nr); /* add cache reference */
if (folio_test_swapbacked(folio)) {
if (folio_test_swapbacked(folio))
__folio_set_swapbacked(newfolio);
if (folio_test_swapcache(folio)) {
folio_set_swapcache(newfolio);
newfolio->private = folio_get_private(folio);
}
if (folio_test_swapcache(folio)) {
folio_set_swapcache(newfolio);
newfolio->private = folio_get_private(folio);
entries = nr;
} else {
VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
entries = 1;
}
......
......@@ -7004,7 +7004,7 @@ static inline bool has_unaccepted_memory(void)
static bool cond_accept_memory(struct zone *zone, unsigned int order)
{
long to_accept;
long to_accept, wmark;
bool ret = false;
if (!has_unaccepted_memory())
......@@ -7013,8 +7013,18 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order)
if (list_empty(&zone->unaccepted_pages))
return false;
wmark = promo_wmark_pages(zone);
/*
* Watermarks have not been initialized yet.
*
* Accepting one MAX_ORDER page to ensure progress.
*/
if (!wmark)
return try_to_accept_memory_one(zone);
/* How much to accept to get to promo watermark? */
to_accept = promo_wmark_pages(zone) -
to_accept = wmark -
(zone_page_state(zone, NR_FREE_PAGES) -
__zone_watermark_unusable_free(zone, order, 0) -
zone_page_state(zone, NR_UNACCEPTED));
......
......@@ -58,9 +58,11 @@ static unsigned short __swap_cgroup_id_xchg(struct swap_cgroup *map,
* entries must not have been charged
*
* @folio: the folio that the swap entry belongs to
* @id: mem_cgroup ID to be recorded
* @ent: the first swap entry to be recorded
*/
void swap_cgroup_record(struct folio *folio, swp_entry_t ent)
void swap_cgroup_record(struct folio *folio, unsigned short id,
swp_entry_t ent)
{
unsigned int nr_ents = folio_nr_pages(folio);
struct swap_cgroup *map;
......@@ -72,8 +74,7 @@ void swap_cgroup_record(struct folio *folio, swp_entry_t ent)
map = swap_cgroup_ctrl[swp_type(ent)].map;
do {
old = __swap_cgroup_id_xchg(map, offset,
mem_cgroup_id(folio_memcg(folio)));
old = __swap_cgroup_id_xchg(map, offset, id);
VM_BUG_ON(old);
} while (++offset != end);
}
......
......@@ -2381,7 +2381,8 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
* vma_merge_new_range() calls khugepaged_enter_vma() too, the below
* call covers the non-merge case.
*/
khugepaged_enter_vma(vma, map->flags);
if (!vma_is_anonymous(vma))
khugepaged_enter_vma(vma, map->flags);
ksm_add_vma(vma);
*vmap = vma;
return 0;
......
......@@ -304,7 +304,9 @@ uffd_stress_bin=./uffd-stress
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16
# Hugetlb tests require source and destination huge pages. Pass in half
# the size of the free pages we have, which is used for *each*.
half_ufd_size_MB=$((freepgs / 2))
# uffd-stress expects a region expressed in MiB, so we adjust
# half_ufd_size_MB accordingly.
half_ufd_size_MB=$(((freepgs * hpgsize_KB) / 1024 / 2))
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16
......