if (!memcg_kmem_enabled()) return cachep; if (gfp & __GFP_NOFAIL) return cachep; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return cachep; if (unlikely(fatal_signal_pending(current))) return cachep; return __memcg_kmem_get_cache(cachep, gfp);...
void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); @@ -535,6 +537,13 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) if (memcg_kmem_enabled()) __memcg_kmem_put_cache(cachep); } static __always_inline struct mem_cgroup *me...
BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK)&&PAGE_SIZE%1024!=0); BUG_ON(vm->nr_pages!=THREAD_SIZE/PAGE_SIZE); for(i=0;i<THREAD_SIZE/PAGE_SIZE;i++) { ret=memcg_kmem_charge_page(vm->pages[i],GFP_KERNEL,0); if(ret) gotoerr; ...
pointer to the corresponding kmem_cache, and also hold a reference to the kmem_cache. And kmem_cache by itself holds a reference to the cgroup. So there is clearly some redundancy, which allows to stop setting the page->mem_cgroup pointer and rely on getting memcg pointer indirectly via km...