static __always_inline void * __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *objp = NULL; int slab_node = numa_mem_id();
if (nodeid == NUMA_NO_NODE) { if (current->mempolicy || cpuset_do_slab_mem_spread()) { objp = alternate_node_alloc(cachep, flags); if (objp) goto out; } /* * Use the locally cached objects if possible. * However ____cache_alloc does not allow fallback * to other nodes. It may fail while we still have * objects on other nodes available. */ objp = ____cache_alloc(cachep, flags); nodeid = slab_node; } elseif (nodeid == slab_node) { objp = ____cache_alloc(cachep, flags); } elseif (!get_node(cachep, nodeid)) { /* Node not bootstrapped yet */ objp = fallback_alloc(cachep, flags); goto out; }
/* * We may just have run out of memory on the local node. * ____cache_alloc_node() knows how to locate memory on other nodes */ if (!objp) objp = ____cache_alloc_node(cachep, flags, nodeid); out: return objp; }
ac = cpu_cache_get(cachep); if (likely(ac->avail)) { ac->touched = 1; objp = ac->entry[--ac->avail];
STATS_INC_ALLOCHIT(cachep); goto out; }
STATS_INC_ALLOCMISS(cachep); objp = cache_alloc_refill(cachep, flags); /* * the 'ac' may be updated by cache_alloc_refill(), * and kmemleak_erase() requires its correct value. */ ac = cpu_cache_get(cachep);
out: /* * To avoid a false negative, if an object that is in one of the * per-CPU caches is leaked, we need to make sure kmemleak doesn't * treat the array pointers as a reference to the object. */ if (objp) kmemleak_erase(&ac->entry[ac->avail]); return objp; }
ac = cpu_cache_get(cachep); batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* * If there was little recent activity on this cache, then * perform only a partial refill. Otherwise we could generate * refill bouncing. */ batchcount = BATCHREFILL_LIMIT; } n = get_node(cachep, node);
direct_grow: if (unlikely(!ac->avail)) { /* Check if we can use obj in pfmemalloc slab */ if (sk_memalloc_socks()) { void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
/* * cache_grow_begin() can reenable interrupts, * then ac could change. */ ac = cpu_cache_get(cachep); if (!ac->avail && slab) alloc_block(cachep, ac, slab, batchcount); cache_grow_end(cachep, slab);
if (is_kfence_address(objp)) { kmemleak_free_recursive(objp, cachep->flags); __kfence_free(objp); return; }
/* * As memory initialization might be integrated into KASAN, * kasan_slab_free and initialization memset must be * kept together to avoid discrepancies in behavior. */ init = slab_want_init_on_free(cachep); if (init && !kasan_has_integrated_init()) memset(objp, 0, cachep->object_size); /* KASAN might put objp into memory quarantine, delaying its reuse. */ if (kasan_slab_free(cachep, objp, init)) return;
/* Use KCSAN to help debug racy use-after-free. */ if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU)) __kcsan_check_access(objp, cachep->object_size, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
/* * Skip calling cache_free_alien() when the platform is not numa. * This will avoid cache misses that happen while accessing slabp (which * is per page memory reference) to get nodeid. Instead use a global * variable to skip the call, which is mostly likely to be present in * the cache. */ if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) return;