The patch titled
Subject: mm/memcg: fix device private memcg accounting
has been added to the -mm tree. Its filename is
mm-memcg-fix-device-private-memcg-accounting.patch
This patch should soon appear at
https://ozlabs.org/~akpm/mmots/broken-out/mm-memcg-fix-device-private-memcg…
and later at
https://ozlabs.org/~akpm/mmotm/broken-out/mm-memcg-fix-device-private-memcg…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Ralph Campbell <rcampbell(a)nvidia.com>
Subject: mm/memcg: fix device private memcg accounting
The code in mc_handle_swap_pte() checks for non_swap_entry() and returns
NULL before checking is_device_private_entry() so device private pages are
never handled. Fix this by checking for non_swap_entry() after handling
device private swap PTEs.
Link: https://lkml.kernel.org/r/20201009215952.2726-1-rcampbell@nvidia.com
Fixes: c733a82874a7 ("mm/memcontrol: support MEMORY_DEVICE_PRIVATE")
Signed-off-by: Ralph Campbell <rcampbell(a)nvidia.com>
Cc: Johannes Weiner <hannes(a)cmpxchg.org>
Cc: Michal Hocko <mhocko(a)kernel.org>
Cc: Vladimir Davydov <vdavydov.dev(a)gmail.com>
Cc: Jerome Glisse <jglisse(a)redhat.com>
Cc: Balbir Singh <bsingharora(a)gmail.com>
Cc: Ira Weiny <ira.weiny(a)intel.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/memcontrol.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
--- a/mm/memcontrol.c~mm-memcg-fix-device-private-memcg-accounting
+++ a/mm/memcontrol.c
@@ -5516,7 +5516,7 @@ static struct page *mc_handle_swap_pte(s
struct page *page = NULL;
swp_entry_t ent = pte_to_swp_entry(ptent);
- if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
+ if (!(mc.flags & MOVE_ANON))
return NULL;
/*
@@ -5535,6 +5535,9 @@ static struct page *mc_handle_swap_pte(s
return page;
}
+ if (non_swap_entry(ent))
+ return NULL;
+
/*
* Because lookup_swap_cache() updates some statistics counter,
* we call find_get_page() with swapper_space directly.
_
Patches currently in -mm which might be from rcampbell(a)nvidia.com are
mm-memcg-fix-device-private-memcg-accounting.patch
mm-test-use-the-new-skip-macro.patch
hmm-test-remove-unused-dmirror_zero_page.patch
mm-move-call-to-compound_head-in-release_pages.patch
mm-migrate-remove-cpages-in-migrate_vma_finalize.patch
mm-migrate-remove-obsolete-comment-about-device-public.patch
The patch titled
Subject: mm: memcg/slab: uncharge during kmem_cache_free_bulk()
has been added to the -mm tree. Its filename is
mm-memcg-slab-uncharge-during-kmem_cache_free_bulk.patch
This patch should soon appear at
https://ozlabs.org/~akpm/mmots/broken-out/mm-memcg-slab-uncharge-during-kme…
and later at
https://ozlabs.org/~akpm/mmotm/broken-out/mm-memcg-slab-uncharge-during-kme…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Bharata B Rao <bharata(a)linux.ibm.com>
Subject: mm: memcg/slab: uncharge during kmem_cache_free_bulk()
Object cgroup charging is done for all the objects during allocation, but
during freeing, uncharging ends up happening for only one object in the
case of bulk allocation/freeing.
Fix this by having a separate call to uncharge all the objects from
kmem_cache_free_bulk() and by modifying memcg_slab_free_hook() to take
care of bulk uncharging.
Link: https://lkml.kernel.org/r/20201009060423.390479-1-bharata@linux.ibm.com
Fixes: 964d4bd370d5 ("mm: memcg/slab: save obj_cgroup for non-root slab objects"
Signed-off-by: Bharata B Rao <bharata(a)linux.ibm.com>
Cc: Roman Gushchin <guro(a)fb.com>
Cc: Christoph Lameter <cl(a)linux.com>
Cc: David Rientjes <rientjes(a)google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim(a)lge.com>
Cc: Vlastimil Babka <vbabka(a)suse.cz>
Cc: Shakeel Butt <shakeelb(a)google.com>
Cc: Johannes Weiner <hannes(a)cmpxchg.org>
Cc: Michal Hocko <mhocko(a)kernel.org>
Cc: Tejun Heo <tj(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/slab.c | 2 +-
mm/slab.h | 50 +++++++++++++++++++++++++++++++-------------------
mm/slub.c | 3 ++-
3 files changed, 34 insertions(+), 21 deletions(-)
--- a/mm/slab.c~mm-memcg-slab-uncharge-during-kmem_cache_free_bulk
+++ a/mm/slab.c
@@ -3438,7 +3438,7 @@ void ___cache_free(struct kmem_cache *ca
memset(objp, 0, cachep->object_size);
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
- memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
+ memcg_slab_free_hook(cachep, &objp, 1);
/*
* Skip calling cache_free_alien() when the platform is not numa.
--- a/mm/slab.h~mm-memcg-slab-uncharge-during-kmem_cache_free_bulk
+++ a/mm/slab.h
@@ -345,30 +345,42 @@ static inline void memcg_slab_post_alloc
obj_cgroup_put(objcg);
}
-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
- void *p)
+static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
+ void **p, int objects)
{
+ struct kmem_cache *s;
struct obj_cgroup *objcg;
+ struct page *page;
unsigned int off;
+ int i;
if (!memcg_kmem_enabled())
return;
- if (!page_has_obj_cgroups(page))
- return;
-
- off = obj_to_index(s, page, p);
- objcg = page_obj_cgroups(page)[off];
- page_obj_cgroups(page)[off] = NULL;
-
- if (!objcg)
- return;
-
- obj_cgroup_uncharge(objcg, obj_full_size(s));
- mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
- -obj_full_size(s));
-
- obj_cgroup_put(objcg);
+ for (i = 0; i < objects; i++) {
+ if (unlikely(!p[i]))
+ continue;
+
+ page = virt_to_head_page(p[i]);
+ if (!page_has_obj_cgroups(page))
+ continue;
+
+ if (!s_orig)
+ s = page->slab_cache;
+ else
+ s = s_orig;
+
+ off = obj_to_index(s, page, p[i]);
+ objcg = page_obj_cgroups(page)[off];
+ if (!objcg)
+ continue;
+
+ page_obj_cgroups(page)[off] = NULL;
+ obj_cgroup_uncharge(objcg, obj_full_size(s));
+ mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
+ -obj_full_size(s));
+ obj_cgroup_put(objcg);
+ }
}
#else /* CONFIG_MEMCG_KMEM */
@@ -406,8 +418,8 @@ static inline void memcg_slab_post_alloc
{
}
-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
- void *p)
+static inline void memcg_slab_free_hook(struct kmem_cache *s,
+ void **p, int objects)
{
}
#endif /* CONFIG_MEMCG_KMEM */
--- a/mm/slub.c~mm-memcg-slab-uncharge-during-kmem_cache_free_bulk
+++ a/mm/slub.c
@@ -3095,7 +3095,7 @@ static __always_inline void do_slab_free
struct kmem_cache_cpu *c;
unsigned long tid;
- memcg_slab_free_hook(s, page, head);
+ memcg_slab_free_hook(s, &head, 1);
redo:
/*
* Determine the currently cpus per cpu slab.
@@ -3257,6 +3257,7 @@ void kmem_cache_free_bulk(struct kmem_ca
if (WARN_ON(!size))
return;
+ memcg_slab_free_hook(s, p, size);
do {
struct detached_freelist df;
_
Patches currently in -mm which might be from bharata(a)linux.ibm.com are
mm-memcg-slab-uncharge-during-kmem_cache_free_bulk.patch