page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); if (likely(page)) goto out; alloc_gfp = gfp; ac.spread_dirty_pages = false; ac.nodemask = nodemask; page = __alloc_pages_slowpath(alloc_gfp, order, &ac); out: if (memcg_kmem_enabled() && (gfp & __GFP_AC...
static __always_inline void * slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long caller) { unsigned long save_flags; void *ptr; int slab_node = numa_mem_id(); /* 参数及相关环境检查*/ flags &= gfp_allowed_mask; cachep = slab_pre_alloc_hook(cache...
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) { int batchcount; struct kmem_cache_node *n; struct array_cache *ac, *shared; int node; void *list = NULL; struct slab *slab; check_irq_off(); node = numa_mem_id(); ac = cpu_cache_get(cachep); ---...
static __always_inline void *slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr) { void **object; struct kmem_cache_cpu *c; #ifdef CONFIG_CMPXCHG_LOCAL unsigned long tid; #else unsigned long flags; #endif if (slab_pre_alloc_hook(s, gfpflags)) return NU...
通过kmem_cache_alloc -> slab_allocstatic __always_inline void * slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) { unsigned long save_flags; void *objp; flags &= gfp_allowed_mask; cachep = slab_pre_alloc_hook(cachep, flags); if (unlikely(!cachep)) return...
通过kmem_cache_alloc、kmem_cache_alloc_node提供特定类型的内核缓存对象申请。他们最终都会调用到slab_alloc。所以主要的slab操作都在slab_alloc函数中。 slab缓存由两部分组成:保存管理性数据的缓存对象和保存被管理对象的各个slab对象 slab cache 上图中缓存即为kmem_cache,slab即为page页帧,缓存对象即为void指针。
3292 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) 3293 { 3294 unsigned long save_flags; 3295 void *objp; 3296 struct obj_cgroup *objcg = NULL; 3297 3298 flags &= gfp_allowed_mask; 3299 cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); ...