Lines Matching refs:slab
392 struct slab *slab; /* The slab from which we are allocating */ member
394 struct slab *partial; /* Partially allocated slabs */
641 static __always_inline void slab_lock(struct slab *slab) in slab_lock() argument
643 bit_spin_lock(PG_locked, &slab->__page_flags); in slab_lock()
646 static __always_inline void slab_unlock(struct slab *slab) in slab_unlock() argument
648 bit_spin_unlock(PG_locked, &slab->__page_flags); in slab_unlock()
652 __update_freelist_fast(struct slab *slab, in __update_freelist_fast() argument
660 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); in __update_freelist_fast()
667 __update_freelist_slow(struct slab *slab, in __update_freelist_slow() argument
673 slab_lock(slab); in __update_freelist_slow()
674 if (slab->freelist == freelist_old && in __update_freelist_slow()
675 slab->counters == counters_old) { in __update_freelist_slow()
676 slab->freelist = freelist_new; in __update_freelist_slow()
677 slab->counters = counters_new; in __update_freelist_slow()
680 slab_unlock(slab); in __update_freelist_slow()
692 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, in __slab_update_freelist() argument
703 ret = __update_freelist_fast(slab, freelist_old, counters_old, in __slab_update_freelist()
706 ret = __update_freelist_slow(slab, freelist_old, counters_old, in __slab_update_freelist()
722 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, in slab_update_freelist() argument
730 ret = __update_freelist_fast(slab, freelist_old, counters_old, in slab_update_freelist()
736 ret = __update_freelist_slow(slab, freelist_old, counters_old, in slab_update_freelist()
802 struct slab *slab) in __fill_map() argument
804 void *addr = slab_address(slab); in __fill_map()
807 bitmap_zero(obj_map, slab->objects); in __fill_map()
809 for (p = slab->freelist; p; p = get_freepointer(s, p)) in __fill_map()
900 struct slab *slab, void *object) in check_valid_pointer() argument
907 base = slab_address(slab); in check_valid_pointer()
910 if (object < base || object >= base + slab->objects * s->size || in check_valid_pointer()
1018 static void print_slab_info(const struct slab *slab) in print_slab_info() argument
1021 slab, slab->objects, slab->inuse, slab->freelist, in print_slab_info()
1022 &slab->__page_flags); in print_slab_info()
1060 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) in print_trailer() argument
1063 u8 *addr = slab_address(slab); in print_trailer()
1067 print_slab_info(slab); in print_trailer()
1102 static void object_err(struct kmem_cache *s, struct slab *slab, in object_err() argument
1109 print_trailer(s, slab, object); in object_err()
1113 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, in freelist_corrupted() argument
1117 !check_valid_pointer(s, slab, nextfree) && freelist) { in freelist_corrupted()
1118 object_err(s, slab, *freelist, "Freechain corrupt"); in freelist_corrupted()
1127 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, in slab_err() argument
1140 print_slab_info(slab); in slab_err()
1193 check_bytes_and_report(struct kmem_cache *s, struct slab *slab, in check_bytes_and_report() argument
1199 u8 *addr = slab_address(slab); in check_bytes_and_report()
1263 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) in check_pad_bytes() argument
1280 return check_bytes_and_report(s, slab, p, "Object padding", in check_pad_bytes()
1286 slab_pad_check(struct kmem_cache *s, struct slab *slab) in slab_pad_check() argument
1298 start = slab_address(slab); in slab_pad_check()
1299 length = slab_size(slab); in slab_pad_check()
1314 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", in slab_pad_check()
1321 static int check_object(struct kmem_cache *s, struct slab *slab, in check_object() argument
1330 if (!check_bytes_and_report(s, slab, object, "Left Redzone", in check_object()
1334 if (!check_bytes_and_report(s, slab, object, "Right Redzone", in check_object()
1342 !check_bytes_and_report(s, slab, object, in check_object()
1350 if (!check_bytes_and_report(s, slab, p, "Alignment padding", in check_object()
1366 !check_bytes_and_report(s, slab, p, "Poison", in check_object()
1371 !check_bytes_and_report(s, slab, p, "End Poison", in check_object()
1378 if (!check_pad_bytes(s, slab, p)) in check_object()
1387 !check_valid_pointer(s, slab, get_freepointer(s, p))) { in check_object()
1388 object_err(s, slab, p, "Freepointer corrupt"); in check_object()
1399 print_trailer(s, slab, object); in check_object()
1406 static int check_slab(struct kmem_cache *s, struct slab *slab) in check_slab() argument
1410 if (!folio_test_slab(slab_folio(slab))) { in check_slab()
1411 slab_err(s, slab, "Not a valid slab page"); in check_slab()
1415 maxobj = order_objects(slab_order(slab), s->size); in check_slab()
1416 if (slab->objects > maxobj) { in check_slab()
1417 slab_err(s, slab, "objects %u > max %u", in check_slab()
1418 slab->objects, maxobj); in check_slab()
1421 if (slab->inuse > slab->objects) { in check_slab()
1422 slab_err(s, slab, "inuse %u > max %u", in check_slab()
1423 slab->inuse, slab->objects); in check_slab()
1427 slab_pad_check(s, slab); in check_slab()
1435 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) in on_freelist() argument
1442 fp = slab->freelist; in on_freelist()
1443 while (fp && nr <= slab->objects) { in on_freelist()
1446 if (!check_valid_pointer(s, slab, fp)) { in on_freelist()
1448 object_err(s, slab, object, in on_freelist()
1452 slab_err(s, slab, "Freepointer corrupt"); in on_freelist()
1453 slab->freelist = NULL; in on_freelist()
1454 slab->inuse = slab->objects; in on_freelist()
1465 max_objects = order_objects(slab_order(slab), s->size); in on_freelist()
1469 if (slab->objects != max_objects) { in on_freelist()
1470 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", in on_freelist()
1471 slab->objects, max_objects); in on_freelist()
1472 slab->objects = max_objects; in on_freelist()
1475 if (slab->inuse != slab->objects - nr) { in on_freelist()
1476 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", in on_freelist()
1477 slab->inuse, slab->objects - nr); in on_freelist()
1478 slab->inuse = slab->objects - nr; in on_freelist()
1484 static void trace(struct kmem_cache *s, struct slab *slab, void *object, in trace() argument
1491 object, slab->inuse, in trace()
1492 slab->freelist); in trace()
1506 struct kmem_cache_node *n, struct slab *slab) in add_full() argument
1512 list_add(&slab->slab_list, &n->full); in add_full()
1515 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) in remove_full() argument
1521 list_del(&slab->slab_list); in remove_full()
1555 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) in setup_slab_debug() argument
1561 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); in setup_slab_debug()
1566 struct slab *slab, void *object) in alloc_consistency_checks() argument
1568 if (!check_slab(s, slab)) in alloc_consistency_checks()
1571 if (!check_valid_pointer(s, slab, object)) { in alloc_consistency_checks()
1572 object_err(s, slab, object, "Freelist Pointer check fails"); in alloc_consistency_checks()
1576 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) in alloc_consistency_checks()
1583 struct slab *slab, void *object, int orig_size) in alloc_debug_processing() argument
1586 if (!alloc_consistency_checks(s, slab, object)) in alloc_debug_processing()
1591 trace(s, slab, object, 1); in alloc_debug_processing()
1597 if (folio_test_slab(slab_folio(slab))) { in alloc_debug_processing()
1604 slab->inuse = slab->objects; in alloc_debug_processing()
1605 slab->freelist = NULL; in alloc_debug_processing()
1611 struct slab *slab, void *object, unsigned long addr) in free_consistency_checks() argument
1613 if (!check_valid_pointer(s, slab, object)) { in free_consistency_checks()
1614 slab_err(s, slab, "Invalid object pointer 0x%p", object); in free_consistency_checks()
1618 if (on_freelist(s, slab, object)) { in free_consistency_checks()
1619 object_err(s, slab, object, "Object already free"); in free_consistency_checks()
1623 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) in free_consistency_checks()
1626 if (unlikely(s != slab->slab_cache)) { in free_consistency_checks()
1627 if (!folio_test_slab(slab_folio(slab))) { in free_consistency_checks()
1628 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", in free_consistency_checks()
1630 } else if (!slab->slab_cache) { in free_consistency_checks()
1635 object_err(s, slab, object, in free_consistency_checks()
1857 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} in setup_slab_debug() argument
1860 struct slab *slab, void *object, int orig_size) { return true; } in alloc_debug_processing() argument
1863 struct slab *slab, void *head, void *tail, int *bulk_cnt, in free_debug_processing() argument
1866 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} in slab_pad_check() argument
1867 static inline int check_object(struct kmem_cache *s, struct slab *slab, in check_object() argument
1873 struct slab *slab) {} in add_full() argument
1875 struct slab *slab) {} in remove_full() argument
1891 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, in freelist_corrupted() argument
1906 struct slab *obj_exts_slab; in mark_objexts_empty()
1919 static inline void mark_failed_objexts_alloc(struct slab *slab) in mark_failed_objexts_alloc() argument
1921 slab->obj_exts = OBJEXTS_ALLOC_FAIL; in mark_failed_objexts_alloc()
1943 static inline void mark_failed_objexts_alloc(struct slab *slab) {} in mark_failed_objexts_alloc() argument
1957 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, in alloc_slab_obj_exts() argument
1960 unsigned int objects = objs_per_slab(s, slab); in alloc_slab_obj_exts()
1969 slab_nid(slab)); in alloc_slab_obj_exts()
1973 mark_failed_objexts_alloc(slab); in alloc_slab_obj_exts()
1982 old_exts = READ_ONCE(slab->obj_exts); in alloc_slab_obj_exts()
1990 slab->obj_exts = new_exts; in alloc_slab_obj_exts()
1992 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { in alloc_slab_obj_exts()
2007 static inline void free_slab_obj_exts(struct slab *slab) in free_slab_obj_exts() argument
2011 obj_exts = slab_obj_exts(slab); in free_slab_obj_exts()
2024 slab->obj_exts = 0; in free_slab_obj_exts()
2041 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, in alloc_slab_obj_exts() argument
2047 static inline void free_slab_obj_exts(struct slab *slab) in free_slab_obj_exts() argument
2063 struct slab *slab; in prepare_slab_obj_exts_hook() local
2074 slab = virt_to_slab(p); in prepare_slab_obj_exts_hook()
2075 if (!slab_obj_exts(slab) && in prepare_slab_obj_exts_hook()
2076 WARN(alloc_slab_obj_exts(slab, s, flags, false), in prepare_slab_obj_exts_hook()
2081 return slab_obj_exts(slab) + obj_to_index(s, slab, p); in prepare_slab_obj_exts_hook()
2102 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, in alloc_tagging_slab_free_hook() argument
2115 obj_exts = slab_obj_exts(slab); in alloc_tagging_slab_free_hook()
2120 unsigned int off = obj_to_index(s, slab, p[i]); in alloc_tagging_slab_free_hook()
2134 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, in alloc_tagging_slab_free_hook() argument
2170 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, in memcg_slab_free_hook() argument
2178 obj_exts = slab_obj_exts(slab); in memcg_slab_free_hook()
2182 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); in memcg_slab_free_hook()
2191 struct slab *slab; in memcg_slab_post_charge() local
2201 slab = folio_slab(folio); in memcg_slab_post_charge()
2202 s = slab->slab_cache; in memcg_slab_post_charge()
2213 slab_exts = slab_obj_exts(slab); in memcg_slab_post_charge()
2215 off = obj_to_index(s, slab, p); in memcg_slab_post_charge()
2232 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, in memcg_slab_free_hook() argument
2404 static inline struct slab *alloc_slab_page(gfp_t flags, int node, in alloc_slab_page()
2408 struct slab *slab; in alloc_slab_page() local
2419 slab = folio_slab(folio); in alloc_slab_page()
2424 slab_set_pfmemalloc(slab); in alloc_slab_page()
2426 return slab; in alloc_slab_page()
2493 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) in shuffle_freelist() argument
2500 if (slab->objects < 2 || !s->random_seq) in shuffle_freelist()
2506 page_limit = slab->objects * s->size; in shuffle_freelist()
2507 start = fixup_red_left(s, slab_address(slab)); in shuffle_freelist()
2512 slab->freelist = cur; in shuffle_freelist()
2514 for (idx = 1; idx < slab->objects; idx++) { in shuffle_freelist()
2531 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) in shuffle_freelist() argument
2537 static __always_inline void account_slab(struct slab *slab, int order, in account_slab() argument
2541 alloc_slab_obj_exts(slab, s, gfp, true); in account_slab()
2543 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), in account_slab()
2547 static __always_inline void unaccount_slab(struct slab *slab, int order, in unaccount_slab() argument
2551 free_slab_obj_exts(slab); in unaccount_slab()
2553 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), in unaccount_slab()
2557 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab()
2559 struct slab *slab; in allocate_slab() local
2578 slab = alloc_slab_page(alloc_gfp, node, oo); in allocate_slab()
2579 if (unlikely(!slab)) { in allocate_slab()
2586 slab = alloc_slab_page(alloc_gfp, node, oo); in allocate_slab()
2587 if (unlikely(!slab)) in allocate_slab()
2592 slab->objects = oo_objects(oo); in allocate_slab()
2593 slab->inuse = 0; in allocate_slab()
2594 slab->frozen = 0; in allocate_slab()
2596 account_slab(slab, oo_order(oo), s, flags); in allocate_slab()
2598 slab->slab_cache = s; in allocate_slab()
2600 kasan_poison_slab(slab); in allocate_slab()
2602 start = slab_address(slab); in allocate_slab()
2604 setup_slab_debug(s, slab, start); in allocate_slab()
2606 shuffle = shuffle_freelist(s, slab); in allocate_slab()
2611 slab->freelist = start; in allocate_slab()
2612 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { in allocate_slab()
2621 return slab; in allocate_slab()
2624 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab()
2635 static void __free_slab(struct kmem_cache *s, struct slab *slab) in __free_slab() argument
2637 struct folio *folio = slab_folio(slab); in __free_slab()
2641 __slab_clear_pfmemalloc(slab); in __free_slab()
2647 unaccount_slab(slab, order, s); in __free_slab()
2653 struct slab *slab = container_of(h, struct slab, rcu_head); in rcu_free_slab() local
2655 __free_slab(slab->slab_cache, slab); in rcu_free_slab()
2658 static void free_slab(struct kmem_cache *s, struct slab *slab) in free_slab() argument
2663 slab_pad_check(s, slab); in free_slab()
2664 for_each_object(p, s, slab_address(slab), slab->objects) in free_slab()
2665 check_object(s, slab, p, SLUB_RED_INACTIVE); in free_slab()
2669 call_rcu(&slab->rcu_head, rcu_free_slab); in free_slab()
2671 __free_slab(s, slab); in free_slab()
2674 static void discard_slab(struct kmem_cache *s, struct slab *slab) in discard_slab() argument
2676 dec_slabs_node(s, slab_nid(slab), slab->objects); in discard_slab()
2677 free_slab(s, slab); in discard_slab()
2684 static inline bool slab_test_node_partial(const struct slab *slab) in slab_test_node_partial() argument
2686 return folio_test_workingset(slab_folio(slab)); in slab_test_node_partial()
2689 static inline void slab_set_node_partial(struct slab *slab) in slab_set_node_partial() argument
2691 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); in slab_set_node_partial()
2694 static inline void slab_clear_node_partial(struct slab *slab) in slab_clear_node_partial() argument
2696 clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); in slab_clear_node_partial()
2703 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) in __add_partial() argument
2707 list_add_tail(&slab->slab_list, &n->partial); in __add_partial()
2709 list_add(&slab->slab_list, &n->partial); in __add_partial()
2710 slab_set_node_partial(slab); in __add_partial()
2714 struct slab *slab, int tail) in add_partial() argument
2717 __add_partial(n, slab, tail); in add_partial()
2721 struct slab *slab) in remove_partial() argument
2724 list_del(&slab->slab_list); in remove_partial()
2725 slab_clear_node_partial(slab); in remove_partial()
2736 struct kmem_cache_node *n, struct slab *slab, int orig_size) in alloc_single_from_partial() argument
2742 object = slab->freelist; in alloc_single_from_partial()
2743 slab->freelist = get_freepointer(s, object); in alloc_single_from_partial()
2744 slab->inuse++; in alloc_single_from_partial()
2746 if (!alloc_debug_processing(s, slab, object, orig_size)) { in alloc_single_from_partial()
2747 remove_partial(n, slab); in alloc_single_from_partial()
2751 if (slab->inuse == slab->objects) { in alloc_single_from_partial()
2752 remove_partial(n, slab); in alloc_single_from_partial()
2753 add_full(s, n, slab); in alloc_single_from_partial()
2765 struct slab *slab, int orig_size) in alloc_single_from_new_slab() argument
2767 int nid = slab_nid(slab); in alloc_single_from_new_slab()
2773 object = slab->freelist; in alloc_single_from_new_slab()
2774 slab->freelist = get_freepointer(s, object); in alloc_single_from_new_slab()
2775 slab->inuse = 1; in alloc_single_from_new_slab()
2777 if (!alloc_debug_processing(s, slab, object, orig_size)) in alloc_single_from_new_slab()
2787 if (slab->inuse == slab->objects) in alloc_single_from_new_slab()
2788 add_full(s, n, slab); in alloc_single_from_new_slab()
2790 add_partial(n, slab, DEACTIVATE_TO_HEAD); in alloc_single_from_new_slab()
2792 inc_slabs_node(s, nid, slab->objects); in alloc_single_from_new_slab()
2799 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2801 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, in put_cpu_partial() argument
2804 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
2809 static struct slab *get_partial_node(struct kmem_cache *s, in get_partial_node()
2813 struct slab *slab, *slab2, *partial = NULL; in get_partial_node() local
2827 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { in get_partial_node()
2828 if (!pfmemalloc_match(slab, pc->flags)) in get_partial_node()
2832 void *object = alloc_single_from_partial(s, n, slab, in get_partial_node()
2835 partial = slab; in get_partial_node()
2842 remove_partial(n, slab); in get_partial_node()
2845 partial = slab; in get_partial_node()
2852 put_cpu_partial(s, slab, 0); in get_partial_node()
2867 static struct slab *get_any_partial(struct kmem_cache *s, in get_any_partial()
2875 struct slab *slab; in get_any_partial() local
2910 slab = get_partial_node(s, n, pc); in get_any_partial()
2911 if (slab) { in get_any_partial()
2919 return slab; in get_any_partial()
2931 static struct slab *get_partial(struct kmem_cache *s, int node, in get_partial()
2934 struct slab *slab; in get_partial() local
2940 slab = get_partial_node(s, get_node(s, searchnode), pc); in get_partial()
2941 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) in get_partial()
2942 return slab; in get_partial()
3028 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, in deactivate_slab() argument
3031 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); in deactivate_slab()
3036 struct slab new; in deactivate_slab()
3037 struct slab old; in deactivate_slab()
3039 if (READ_ONCE(slab->freelist)) { in deactivate_slab()
3058 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) in deactivate_slab()
3072 old.freelist = READ_ONCE(slab->freelist); in deactivate_slab()
3073 old.counters = READ_ONCE(slab->counters); in deactivate_slab()
3086 } while (!slab_update_freelist(s, slab, in deactivate_slab()
3096 discard_slab(s, slab); in deactivate_slab()
3100 add_partial(n, slab, tail); in deactivate_slab()
3109 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) in __put_partials()
3112 struct slab *slab, *slab_to_discard = NULL; in __put_partials() local
3116 slab = partial_slab; in __put_partials()
3117 partial_slab = slab->next; in __put_partials()
3119 n2 = get_node(s, slab_nid(slab)); in __put_partials()
3128 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { in __put_partials()
3129 slab->next = slab_to_discard; in __put_partials()
3130 slab_to_discard = slab; in __put_partials()
3132 add_partial(n, slab, DEACTIVATE_TO_TAIL); in __put_partials()
3141 slab = slab_to_discard; in __put_partials()
3145 discard_slab(s, slab); in __put_partials()
3155 struct slab *partial_slab; in put_partials()
3170 struct slab *partial_slab; in put_partials_cpu()
3185 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) in put_cpu_partial() argument
3187 struct slab *oldslab; in put_cpu_partial()
3188 struct slab *slab_to_put = NULL; in put_cpu_partial()
3212 slab->slabs = slabs; in put_cpu_partial()
3213 slab->next = oldslab; in put_cpu_partial()
3215 this_cpu_write(s->cpu_slab->partial, slab); in put_cpu_partial()
3236 struct slab *slab; in flush_slab() local
3241 slab = c->slab; in flush_slab()
3244 c->slab = NULL; in flush_slab()
3250 if (slab) { in flush_slab()
3251 deactivate_slab(s, slab, freelist); in flush_slab()
3260 struct slab *slab = c->slab; in __flush_cpu_slab() local
3262 c->slab = NULL; in __flush_cpu_slab()
3266 if (slab) { in __flush_cpu_slab()
3267 deactivate_slab(s, slab, freelist); in __flush_cpu_slab()
3296 if (c->slab) in flush_cpu_slab()
3306 return c->slab || slub_percpu_partial(c); in has_cpu_slab()
3375 static inline int node_match(struct slab *slab, int node) in node_match() argument
3378 if (node != NUMA_NO_NODE && slab_nid(slab) != node) in node_match()
3385 static int count_free(struct slab *slab) in count_free() argument
3387 return slab->objects - slab->inuse; in count_free()
3397 struct slab *slab, void *head, void *tail, int *bulk_cnt, in free_debug_processing() argument
3405 if (!check_slab(s, slab)) in free_debug_processing()
3409 if (slab->inuse < *bulk_cnt) { in free_debug_processing()
3410 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", in free_debug_processing()
3411 slab->inuse, *bulk_cnt); in free_debug_processing()
3421 if (!free_consistency_checks(s, slab, object, addr)) in free_debug_processing()
3427 trace(s, slab, object, 0); in free_debug_processing()
3440 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", in free_debug_processing()
3456 int (*get_count)(struct slab *)) in count_partial() argument
3460 struct slab *slab; in count_partial() local
3463 list_for_each_entry(slab, &n->partial, slab_list) in count_partial()
3464 x += get_count(slab); in count_partial()
3477 struct slab *slab; in count_partial_free_approx() local
3481 list_for_each_entry(slab, &n->partial, slab_list) in count_partial_free_approx()
3482 x += slab->objects - slab->inuse; in count_partial_free_approx()
3491 list_for_each_entry(slab, &n->partial, slab_list) { in count_partial_free_approx()
3492 x += slab->objects - slab->inuse; in count_partial_free_approx()
3496 list_for_each_entry_reverse(slab, &n->partial, slab_list) { in count_partial_free_approx()
3497 x += slab->objects - slab->inuse; in count_partial_free_approx()
3548 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) in pfmemalloc_match() argument
3550 if (unlikely(slab_test_pfmemalloc(slab))) in pfmemalloc_match()
3577 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) in get_freelist() argument
3579 struct slab new; in get_freelist()
3586 freelist = slab->freelist; in get_freelist()
3587 counters = slab->counters; in get_freelist()
3591 new.inuse = slab->objects; in get_freelist()
3594 } while (!__slab_update_freelist(s, slab, in get_freelist()
3605 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) in freeze_slab() argument
3607 struct slab new; in freeze_slab()
3612 freelist = slab->freelist; in freeze_slab()
3613 counters = slab->counters; in freeze_slab()
3618 new.inuse = slab->objects; in freeze_slab()
3621 } while (!slab_update_freelist(s, slab, in freeze_slab()
3652 struct slab *slab; in ___slab_alloc() local
3661 slab = READ_ONCE(c->slab); in ___slab_alloc()
3662 if (!slab) { in ___slab_alloc()
3673 if (unlikely(!node_match(slab, node))) { in ___slab_alloc()
3691 if (unlikely(!pfmemalloc_match(slab, gfpflags))) in ___slab_alloc()
3696 if (unlikely(slab != c->slab)) { in ___slab_alloc()
3704 freelist = get_freelist(s, slab); in ___slab_alloc()
3707 c->slab = NULL; in ___slab_alloc()
3725 VM_BUG_ON(!c->slab->frozen); in ___slab_alloc()
3734 if (slab != c->slab) { in ___slab_alloc()
3739 c->slab = NULL; in ___slab_alloc()
3743 deactivate_slab(s, slab, freelist); in ___slab_alloc()
3750 if (unlikely(c->slab)) { in ___slab_alloc()
3760 slab = slub_percpu_partial(c); in ___slab_alloc()
3761 slub_set_percpu_partial(c, slab); in ___slab_alloc()
3763 if (likely(node_match(slab, node) && in ___slab_alloc()
3764 pfmemalloc_match(slab, gfpflags))) { in ___slab_alloc()
3765 c->slab = slab; in ___slab_alloc()
3766 freelist = get_freelist(s, slab); in ___slab_alloc()
3774 slab->next = NULL; in ___slab_alloc()
3775 __put_partials(s, slab); in ___slab_alloc()
3798 slab = get_partial(s, node, &pc); in ___slab_alloc()
3799 if (slab) { in ___slab_alloc()
3813 freelist = freeze_slab(s, slab); in ___slab_alloc()
3818 slab = new_slab(s, pc.flags, node); in ___slab_alloc()
3821 if (unlikely(!slab)) { in ___slab_alloc()
3834 freelist = alloc_single_from_new_slab(s, slab, orig_size); in ___slab_alloc()
3849 freelist = slab->freelist; in ___slab_alloc()
3850 slab->freelist = NULL; in ___slab_alloc()
3851 slab->inuse = slab->objects; in ___slab_alloc()
3852 slab->frozen = 1; in ___slab_alloc()
3854 inc_slabs_node(s, slab_nid(slab), slab->objects); in ___slab_alloc()
3856 if (unlikely(!pfmemalloc_match(slab, gfpflags))) { in ___slab_alloc()
3861 deactivate_slab(s, slab, get_freepointer(s, freelist)); in ___slab_alloc()
3868 if (unlikely(c->slab)) { in ___slab_alloc()
3870 struct slab *flush_slab = c->slab; in ___slab_alloc()
3872 c->slab = NULL; in ___slab_alloc()
3884 c->slab = slab; in ___slab_alloc()
3919 struct slab *slab; in __slab_alloc_node() local
3957 slab = c->slab; in __slab_alloc_node()
3960 unlikely(!object || !slab || !node_match(slab, node))) { in __slab_alloc_node()
3994 struct slab *slab; in __slab_alloc_node() local
3999 slab = get_partial(s, node, &pc); in __slab_alloc_node()
4001 if (slab) in __slab_alloc_node()
4004 slab = new_slab(s, gfpflags, node); in __slab_alloc_node()
4005 if (unlikely(!slab)) { in __slab_alloc_node()
4010 object = alloc_single_from_new_slab(s, slab, orig_size); in __slab_alloc_node()
4313 struct kmem_cache *s, struct slab *slab, in free_to_partial_list() argument
4317 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); in free_to_partial_list()
4318 struct slab *slab_free = NULL; in free_to_partial_list()
4328 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { in free_to_partial_list()
4329 void *prior = slab->freelist; in free_to_partial_list()
4332 slab->inuse -= cnt; in free_to_partial_list()
4334 slab->freelist = head; in free_to_partial_list()
4341 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) in free_to_partial_list()
4342 slab_free = slab; in free_to_partial_list()
4346 remove_full(s, n, slab); in free_to_partial_list()
4348 add_partial(n, slab, DEACTIVATE_TO_TAIL); in free_to_partial_list()
4352 remove_partial(n, slab); in free_to_partial_list()
4381 static void __slab_free(struct kmem_cache *s, struct slab *slab, in __slab_free() argument
4388 struct slab new; in __slab_free()
4397 free_to_partial_list(s, slab, head, tail, cnt, addr); in __slab_free()
4406 prior = slab->freelist; in __slab_free()
4407 counters = slab->counters; in __slab_free()
4416 n = get_node(s, slab_nid(slab)); in __slab_free()
4427 on_node_partial = slab_test_node_partial(slab); in __slab_free()
4431 } while (!slab_update_freelist(s, slab, in __slab_free()
4449 put_cpu_partial(s, slab, 1); in __slab_free()
4473 add_partial(n, slab, DEACTIVATE_TO_TAIL); in __slab_free()
4484 remove_partial(n, slab); in __slab_free()
4490 discard_slab(s, slab); in __slab_free()
4510 struct slab *slab, void *head, void *tail, in do_slab_free() argument
4530 if (unlikely(slab != c->slab)) { in do_slab_free()
4531 __slab_free(s, slab, head, tail, cnt, addr); in do_slab_free()
4548 if (unlikely(slab != c->slab)) { in do_slab_free()
4565 struct slab *slab, void *head, void *tail, in do_slab_free() argument
4568 __slab_free(s, slab, head, tail, cnt, addr); in do_slab_free()
4573 void slab_free(struct kmem_cache *s, struct slab *slab, void *object, in slab_free() argument
4576 memcg_slab_free_hook(s, slab, &object, 1); in slab_free()
4577 alloc_tagging_slab_free_hook(s, slab, &object, 1); in slab_free()
4580 do_slab_free(s, slab, object, object, 1, addr); in slab_free()
4594 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, in slab_free_bulk() argument
4597 memcg_slab_free_hook(s, slab, p, cnt); in slab_free_bulk()
4598 alloc_tagging_slab_free_hook(s, slab, p, cnt); in slab_free_bulk()
4604 do_slab_free(s, slab, head, tail, cnt, addr); in slab_free_bulk()
4613 struct slab *slab = virt_to_slab(object); in slab_free_after_rcu_debug() local
4622 if (WARN_ON(!slab)) in slab_free_after_rcu_debug()
4624 s = slab->slab_cache; in slab_free_after_rcu_debug()
4630 do_slab_free(s, slab, object, object, 1, _THIS_IP_); in slab_free_after_rcu_debug()
4643 struct slab *slab; in virt_to_cache() local
4645 slab = virt_to_slab(obj); in virt_to_cache()
4646 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) in virt_to_cache()
4648 return slab->slab_cache; in virt_to_cache()
4710 struct slab *slab; in kfree() local
4725 slab = folio_slab(folio); in kfree()
4726 s = slab->slab_cache; in kfree()
4727 slab_free(s, slab, x, _RET_IP_); in kfree()
4732 struct slab *slab; member
4766 df->slab = NULL; in build_detached_freelist()
4770 df->slab = folio_slab(folio); in build_detached_freelist()
4771 df->s = df->slab->slab_cache; in build_detached_freelist()
4773 df->slab = folio_slab(folio); in build_detached_freelist()
4791 if (df->slab == virt_to_slab(object)) { in build_detached_freelist()
4823 if (!df.slab) in __kmem_cache_free_bulk()
4829 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, in __kmem_cache_free_bulk()
4844 if (!df.slab) in kmem_cache_free_bulk()
4847 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], in kmem_cache_free_bulk()
5170 struct slab *slab; in early_kmem_cache_node_alloc() local
5175 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); in early_kmem_cache_node_alloc()
5177 BUG_ON(!slab); in early_kmem_cache_node_alloc()
5178 if (slab_nid(slab) != node) { in early_kmem_cache_node_alloc()
5183 n = slab->freelist; in early_kmem_cache_node_alloc()
5189 slab->freelist = get_freepointer(kmem_cache_node, n); in early_kmem_cache_node_alloc()
5190 slab->inuse = 1; in early_kmem_cache_node_alloc()
5193 inc_slabs_node(kmem_cache_node, node, slab->objects); in early_kmem_cache_node_alloc()
5199 __add_partial(n, slab, DEACTIVATE_TO_HEAD); in early_kmem_cache_node_alloc()
5423 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, in list_slab_objects() argument
5427 void *addr = slab_address(slab); in list_slab_objects()
5430 slab_err(s, slab, text, s->name); in list_slab_objects()
5433 __fill_map(object_map, s, slab); in list_slab_objects()
5435 for_each_object(p, s, addr, slab->objects) { in list_slab_objects()
5456 struct slab *slab, *h; in free_partial() local
5460 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { in free_partial()
5461 if (!slab->inuse) { in free_partial()
5462 remove_partial(n, slab); in free_partial()
5463 list_add(&slab->slab_list, &discard); in free_partial()
5465 list_slab_objects(s, slab, in free_partial()
5471 list_for_each_entry_safe(slab, h, &discard, slab_list) in free_partial()
5472 discard_slab(s, slab); in free_partial()
5505 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kmem_obj_info() argument
5512 struct kmem_cache *s = slab->slab_cache; in __kmem_obj_info()
5516 kpp->kp_slab = slab; in __kmem_obj_info()
5518 base = slab_address(slab); in __kmem_obj_info()
5525 objnr = obj_to_index(s, slab, objp); in __kmem_obj_info()
5529 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size in __kmem_obj_info()
5615 const struct slab *slab, bool to_user) in __check_heap_object() argument
5624 s = slab->slab_cache; in __check_heap_object()
5627 if (ptr < slab_address(slab)) in __check_heap_object()
5635 offset = (ptr - slab_address(slab)) % s->size; in __check_heap_object()
5671 struct slab *slab; in __kmem_cache_do_shrink() local
5672 struct slab *t; in __kmem_cache_do_shrink()
5691 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { in __kmem_cache_do_shrink()
5692 int free = slab->objects - slab->inuse; in __kmem_cache_do_shrink()
5700 if (free == slab->objects) { in __kmem_cache_do_shrink()
5701 list_move(&slab->slab_list, &discard); in __kmem_cache_do_shrink()
5702 slab_clear_node_partial(slab); in __kmem_cache_do_shrink()
5704 dec_slabs_node(s, node, slab->objects); in __kmem_cache_do_shrink()
5706 list_move(&slab->slab_list, promote + free - 1); in __kmem_cache_do_shrink()
5719 list_for_each_entry_safe(slab, t, &discard, slab_list) in __kmem_cache_do_shrink()
5720 free_slab(s, slab); in __kmem_cache_do_shrink()
5876 struct slab *p; in bootstrap()
6065 static int count_inuse(struct slab *slab) in count_inuse() argument
6067 return slab->inuse; in count_inuse()
6070 static int count_total(struct slab *slab) in count_total() argument
6072 return slab->objects; in count_total()
6077 static void validate_slab(struct kmem_cache *s, struct slab *slab, in validate_slab() argument
6081 void *addr = slab_address(slab); in validate_slab()
6083 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) in validate_slab()
6087 __fill_map(obj_map, s, slab); in validate_slab()
6088 for_each_object(p, s, addr, slab->objects) { in validate_slab()
6092 if (!check_object(s, slab, p, val)) in validate_slab()
6101 struct slab *slab; in validate_slab_node() local
6106 list_for_each_entry(slab, &n->partial, slab_list) { in validate_slab_node()
6107 validate_slab(s, slab, obj_map); in validate_slab_node()
6119 list_for_each_entry(slab, &n->full, slab_list) { in validate_slab_node()
6120 validate_slab(s, slab, obj_map); in validate_slab_node()
6304 struct slab *slab, enum track_item alloc, in process_slab() argument
6307 void *addr = slab_address(slab); in process_slab()
6311 __fill_map(obj_map, s, slab); in process_slab()
6313 for_each_object(p, s, addr, slab->objects) in process_slab()
6357 struct slab *slab; in show_slab_objects() local
6359 slab = READ_ONCE(c->slab); in show_slab_objects()
6360 if (!slab) in show_slab_objects()
6363 node = slab_nid(slab); in show_slab_objects()
6365 x = slab->objects; in show_slab_objects()
6367 x = slab->inuse; in show_slab_objects()
6375 slab = slub_percpu_partial_read_once(c); in show_slab_objects()
6376 if (slab) { in show_slab_objects()
6377 node = slab_nid(slab); in show_slab_objects()
6383 x = data_race(slab->slabs); in show_slab_objects()
6583 struct slab *slab; in slabs_cpu_partial_show() local
6585 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
6587 if (slab) in slabs_cpu_partial_show()
6588 slabs += data_race(slab->slabs); in slabs_cpu_partial_show()
6598 struct slab *slab; in slabs_cpu_partial_show() local
6600 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
6601 if (slab) { in slabs_cpu_partial_show()
6602 slabs = data_race(slab->slabs); in slabs_cpu_partial_show()
7348 struct slab *slab; in slab_debug_trace_open() local
7354 list_for_each_entry(slab, &n->partial, slab_list) in slab_debug_trace_open()
7355 process_slab(t, s, slab, alloc, obj_map); in slab_debug_trace_open()
7356 list_for_each_entry(slab, &n->full, slab_list) in slab_debug_trace_open()
7357 process_slab(t, s, slab, alloc, obj_map); in slab_debug_trace_open()