Lines Matching +full:charging +full:- +full:algorithm
1 // SPDX-License-Identifier: GPL-2.0
36 #include <linux/fault-inject.h>
43 #include <kunit/test-bug.h>
54 * 2. node->list_lock (Spinlock)
55 * 3. kmem_cache->cpu_slab->lock (Local lock)
73 * A. slab->freelist -> List of free objects in a slab
74 * B. slab->inuse -> Number of objects in use
75 * C. slab->objects -> Number of objects in slab
76 * D. slab->frozen -> frozen state
97 * - node partial slab: PG_Workingset && !frozen
98 * - cpu partial slab: !PG_Workingset && !frozen
99 * - cpu slab: !PG_Workingset && frozen
100 * - full slab: !PG_Workingset && !frozen
119 * cpu_slab->lock local lock
128 * an in-progress slow path operations. In this case the local lock is always
164 * slab->frozen The slab is frozen and exempt from list processing.
236 (s->flags & SLAB_KMALLOC)); in slub_debug_orig_size()
242 p += s->red_left_pad; in fixup_red_left()
259 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
261 * - Variable sizing of the per node arrays
304 #define OO_MASK ((1 << OO_SHIFT) - 1)
408 * avoid this_cpu_add()'s irq-disable overhead. in stat()
410 raw_cpu_inc(s->cpu_slab->stat[si]); in stat()
418 raw_cpu_add(s->cpu_slab->stat[si], v); in stat_add()
438 return s->node[node]; in get_node()
470 * with an XOR of the address where the pointer is held and a per-cache
479 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); in freelist_ptr_encode()
492 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); in freelist_ptr_decode()
505 ptr_addr = (unsigned long)object + s->offset; in get_freepointer()
513 prefetchw(object + s->offset); in prefetch_freepointer()
537 freepointer_addr = (unsigned long)object + s->offset; in get_freepointer_safe()
544 unsigned long freeptr_addr = (unsigned long)object + s->offset; in set_freepointer()
559 return s->offset >= s->inuse; in freeptr_outside_object()
569 return s->inuse + sizeof(void *); in get_info_end()
571 return s->inuse; in get_info_end()
577 __p < (__addr) + (__objects) * (__s)->size; \
578 __p += (__s)->size)
610 s->cpu_partial = nr_objects; in slub_set_cpu_partial()
616 * be half-full. in slub_set_cpu_partial()
618 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); in slub_set_cpu_partial()
619 s->cpu_partial_slabs = nr_slabs; in slub_set_cpu_partial()
624 return s->cpu_partial_slabs; in slub_get_cpu_partial()
643 bit_spin_lock(PG_locked, &slab->__page_flags); in slab_lock()
648 bit_spin_unlock(PG_locked, &slab->__page_flags); in slab_unlock()
660 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); in __update_freelist_fast()
674 if (slab->freelist == freelist_old && in __update_freelist_slow()
675 slab->counters == counters_old) { in __update_freelist_slow()
676 slab->freelist = freelist_new; in __update_freelist_slow()
677 slab->counters = counters_new; in __update_freelist_slow()
702 if (s->flags & __CMPXCHG_DOUBLE) { in __slab_update_freelist()
716 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __slab_update_freelist()
729 if (s->flags & __CMPXCHG_DOUBLE) { in slab_update_freelist()
747 pr_info("%s %s: cmpxchg double redo ", n, s->name); in slab_update_freelist()
789 return s->object_size; in get_orig_size()
807 bitmap_zero(obj_map, slab->objects); in __fill_map()
809 for (p = slab->freelist; p; p = get_freepointer(s, p)) in __fill_map()
821 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); in slab_add_kunit_errors()
825 (*(int *)resource->data)++; in slab_add_kunit_errors()
837 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); in slab_in_kunit_test()
850 if (s->flags & SLAB_RED_ZONE) in size_from_object()
851 return s->size - s->red_left_pad; in size_from_object()
853 return s->size; in size_from_object()
858 if (s->flags & SLAB_RED_ZONE) in restore_red_left()
859 p -= s->red_left_pad; in restore_red_left()
910 if (object < base || object >= base + slab->objects * s->size || in check_valid_pointer()
911 (object - base) % s->size) { in check_valid_pointer()
963 p->handle = handle; in set_track_update()
965 p->addr = addr; in set_track_update()
966 p->cpu = smp_processor_id(); in set_track_update()
967 p->pid = current->pid; in set_track_update()
968 p->when = jiffies; in set_track_update()
983 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
994 if (!t->addr) in print_track()
998 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track()
1000 handle = READ_ONCE(t->handle); in print_track()
1011 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
1021 slab, slab->objects, slab->inuse, slab->freelist, in print_slab_info()
1022 &slab->__page_flags); in print_slab_info()
1027 set_orig_size(s, (void *)object, s->object_size); in skip_orig_size_check()
1039 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); in slab_bug()
1040 pr_err("-----------------------------------------------------------------------------\n\n"); in slab_bug()
1056 pr_err("FIX %s: %pV\n", s->name, &vaf); in slab_fix()
1070 p, p - addr, get_freepointer(s, p)); in print_trailer()
1072 if (s->flags & SLAB_RED_ZONE) in print_trailer()
1073 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, in print_trailer()
1074 s->red_left_pad); in print_trailer()
1076 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); in print_trailer()
1079 min_t(unsigned int, s->object_size, PAGE_SIZE)); in print_trailer()
1080 if (s->flags & SLAB_RED_ZONE) in print_trailer()
1081 print_section(KERN_ERR, "Redzone ", p + s->object_size, in print_trailer()
1082 s->inuse - s->object_size); in print_trailer()
1086 if (s->flags & SLAB_STORE_USER) in print_trailer()
1097 size_from_object(s) - off); in print_trailer()
1116 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && in freelist_corrupted()
1148 unsigned int poison_size = s->object_size; in init_object()
1150 if (s->flags & SLAB_RED_ZONE) { in init_object()
1153 * the shadow makes it possible to distinguish uninit-value in init_object()
1154 * from use-after-free. in init_object()
1156 memset_no_sanitize_memory(p - s->red_left_pad, val, in init_object()
1157 s->red_left_pad); in init_object()
1169 if (s->flags & __OBJECT_POISON) { in init_object()
1170 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1); in init_object()
1171 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1); in init_object()
1174 if (s->flags & SLAB_RED_ZONE) in init_object()
1176 s->inuse - poison_size); in init_object()
1182 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); in restore_bytes()
1183 memset(from, data, to - from); in restore_bytes()
1208 while (end > fault && end[-1] == value) in check_bytes_and_report()
1209 end--; in check_bytes_and_report()
1215 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", in check_bytes_and_report()
1216 fault, end - 1, fault - addr, in check_bytes_and_report()
1235 * object + s->object_size
1243 * object + s->inuse
1255 * object + s->size
1256 * Nothing is used beyond s->size.
1267 if (s->flags & SLAB_STORE_USER) { in check_pad_bytes()
1271 if (s->flags & SLAB_KMALLOC) in check_pad_bytes()
1281 p + off, POISON_INUSE, size_from_object(s) - off); in check_pad_bytes()
1295 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
1301 remainder = length % s->size; in slab_pad_check()
1305 pad = end - remainder; in slab_pad_check()
1311 while (end > fault && end[-1] == POISON_INUSE) in slab_pad_check()
1312 end--; in slab_pad_check()
1314 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", in slab_pad_check()
1315 fault, end - 1, fault - start); in slab_pad_check()
1325 u8 *endobject = object + s->object_size; in check_object()
1329 if (s->flags & SLAB_RED_ZONE) { in check_object()
1331 object - s->red_left_pad, val, s->red_left_pad)) in check_object()
1335 endobject, val, s->inuse - s->object_size)) in check_object()
1341 if (s->object_size > orig_size && in check_object()
1344 val, s->object_size - orig_size)) { in check_object()
1349 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
1352 s->inuse - s->object_size)) in check_object()
1357 if (s->flags & SLAB_POISON) { in check_object()
1358 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { in check_object()
1365 if (kasan_meta_size < s->object_size - 1 && in check_object()
1368 s->object_size - kasan_meta_size - 1)) in check_object()
1370 if (kasan_meta_size < s->object_size && in check_object()
1372 p + s->object_size - 1, POISON_END, 1)) in check_object()
1415 maxobj = order_objects(slab_order(slab), s->size); in check_slab()
1416 if (slab->objects > maxobj) { in check_slab()
1418 slab->objects, maxobj); in check_slab()
1421 if (slab->inuse > slab->objects) { in check_slab()
1423 slab->inuse, slab->objects); in check_slab()
1442 fp = slab->freelist; in on_freelist()
1443 while (fp && nr <= slab->objects) { in on_freelist()
1453 slab->freelist = NULL; in on_freelist()
1454 slab->inuse = slab->objects; in on_freelist()
1465 max_objects = order_objects(slab_order(slab), s->size); in on_freelist()
1469 if (slab->objects != max_objects) { in on_freelist()
1471 slab->objects, max_objects); in on_freelist()
1472 slab->objects = max_objects; in on_freelist()
1475 if (slab->inuse != slab->objects - nr) { in on_freelist()
1477 slab->inuse, slab->objects - nr); in on_freelist()
1478 slab->inuse = slab->objects - nr; in on_freelist()
1487 if (s->flags & SLAB_TRACE) { in trace()
1489 s->name, in trace()
1491 object, slab->inuse, in trace()
1492 slab->freelist); in trace()
1496 s->object_size); in trace()
1508 if (!(s->flags & SLAB_STORE_USER)) in add_full()
1511 lockdep_assert_held(&n->list_lock); in add_full()
1512 list_add(&slab->slab_list, &n->full); in add_full()
1517 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
1520 lockdep_assert_held(&n->list_lock); in remove_full()
1521 list_del(&slab->slab_list); in remove_full()
1526 return atomic_long_read(&n->nr_slabs); in node_nr_slabs()
1533 atomic_long_inc(&n->nr_slabs); in inc_slabs_node()
1534 atomic_long_add(objects, &n->total_objects); in inc_slabs_node()
1540 atomic_long_dec(&n->nr_slabs); in dec_slabs_node()
1541 atomic_long_sub(objects, &n->total_objects); in dec_slabs_node()
1585 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in alloc_debug_processing()
1604 slab->inuse = slab->objects; in alloc_debug_processing()
1605 slab->freelist = NULL; in alloc_debug_processing()
1626 if (unlikely(s != slab->slab_cache)) { in free_consistency_checks()
1630 } else if (!slab->slab_cache) { in free_consistency_checks()
1648 * @init: assume this is initial parsing and not per-kmem-create parsing
1674 case '-': in parse_slub_debug_flags()
1783 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); in setup_slub_debug()
1791 * kmem_cache_flags - apply debugging options to the cache
1797 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1833 end = next_block - 1; in kmem_cache_flags()
1835 glob = strnchr(iter, end - iter, '*'); in kmem_cache_flags()
1837 cmplen = glob - iter; in kmem_cache_flags()
1839 cmplen = max_t(size_t, len, (end - iter)); in kmem_cache_flags()
1911 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, in mark_objexts_empty()
1921 slab->obj_exts = OBJEXTS_ALLOC_FAIL; in mark_failed_objexts_alloc()
1975 return -ENOMEM; in alloc_slab_obj_exts()
1982 old_exts = READ_ONCE(slab->obj_exts); in alloc_slab_obj_exts()
1990 slab->obj_exts = new_exts; in alloc_slab_obj_exts()
1992 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { in alloc_slab_obj_exts()
2024 slab->obj_exts = 0; in free_slab_obj_exts()
2068 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) in prepare_slab_obj_exts_hook()
2078 __func__, s->name)) in prepare_slab_obj_exts_hook()
2097 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); in alloc_tagging_slab_alloc_hook()
2111 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ in alloc_tagging_slab_free_hook()
2112 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) in alloc_tagging_slab_free_hook()
2122 alloc_tag_sub(&obj_exts[off].ref, s->size); in alloc_tagging_slab_free_hook()
2153 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) in memcg_slab_post_alloc_hook()
2202 s = slab->slab_cache; in memcg_slab_post_charge()
2265 bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay; in slab_free_hook()
2267 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
2270 debug_check_no_locks_freed(x, s->object_size); in slab_free_hook()
2272 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
2273 debug_check_no_obj_freed(x, s->object_size); in slab_free_hook()
2275 /* Use KCSAN to help debug racy use-after-free. */ in slab_free_hook()
2277 __kcsan_check_access(x, s->object_size, in slab_free_hook()
2305 delayed_free->object = x; in slab_free_hook()
2306 call_rcu(&delayed_free->head, slab_free_after_rcu_debug); in slab_free_hook()
2331 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; in slab_free_hook()
2333 s->size - inuse - rsize); in slab_free_hook()
2382 --(*cnt); in slab_free_freelist_hook()
2393 if (unlikely(s->ctor)) { in setup_object()
2395 s->ctor(object); in setup_object()
2421 /* Make the flag visible before any changes to folio->mapping */ in alloc_slab_page()
2430 /* Pre-initialize the random sequence cache */
2433 unsigned int count = oo_objects(s->oo); in init_cache_random_seq()
2437 if (s->random_seq) in init_cache_random_seq()
2443 s->name); in init_cache_random_seq()
2448 if (s->random_seq) { in init_cache_random_seq()
2452 s->random_seq[i] *= s->size; in init_cache_random_seq()
2470 /* Get the next entry on the pre-computed freelist randomized */
2483 idx = s->random_seq[*pos]; in next_freelist_entry()
2492 /* Shuffle the single linked freelist based on a random pre-computed sequence */
2500 if (slab->objects < 2 || !s->random_seq) in shuffle_freelist()
2503 freelist_count = oo_objects(s->oo); in shuffle_freelist()
2506 page_limit = slab->objects * s->size; in shuffle_freelist()
2512 slab->freelist = cur; in shuffle_freelist()
2514 for (idx = 1; idx < slab->objects; idx++) { in shuffle_freelist()
2540 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) in account_slab()
2554 -(PAGE_SIZE << order)); in unaccount_slab()
2560 struct kmem_cache_order_objects oo = s->oo; in allocate_slab()
2568 flags |= s->allocflags; in allocate_slab()
2571 * Let the initial higher-order allocation fail under memory pressure in allocate_slab()
2572 * so we fall-back to the minimum order allocation. in allocate_slab()
2575 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab()
2580 oo = s->min; in allocate_slab()
2592 slab->objects = oo_objects(oo); in allocate_slab()
2593 slab->inuse = 0; in allocate_slab()
2594 slab->frozen = 0; in allocate_slab()
2598 slab->slab_cache = s; in allocate_slab()
2611 slab->freelist = start; in allocate_slab()
2612 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { in allocate_slab()
2613 next = p + s->size; in allocate_slab()
2629 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); in new_slab()
2642 folio->mapping = NULL; in __free_slab()
2648 __free_pages(&folio->page, order); in __free_slab()
2655 __free_slab(slab->slab_cache, slab); in rcu_free_slab()
2664 for_each_object(p, s, slab_address(slab), slab->objects) in free_slab()
2668 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) in free_slab()
2669 call_rcu(&slab->rcu_head, rcu_free_slab); in free_slab()
2676 dec_slabs_node(s, slab_nid(slab), slab->objects); in discard_slab()
2682 * the per-node partial list.
2705 n->nr_partial++; in __add_partial()
2707 list_add_tail(&slab->slab_list, &n->partial); in __add_partial()
2709 list_add(&slab->slab_list, &n->partial); in __add_partial()
2716 lockdep_assert_held(&n->list_lock); in add_partial()
2723 lockdep_assert_held(&n->list_lock); in remove_partial()
2724 list_del(&slab->slab_list); in remove_partial()
2726 n->nr_partial--; in remove_partial()
2731 * slab from the n->partial list. Remove only a single object from the slab, do
2740 lockdep_assert_held(&n->list_lock); in alloc_single_from_partial()
2742 object = slab->freelist; in alloc_single_from_partial()
2743 slab->freelist = get_freepointer(s, object); in alloc_single_from_partial()
2744 slab->inuse++; in alloc_single_from_partial()
2751 if (slab->inuse == slab->objects) { in alloc_single_from_partial()
2773 object = slab->freelist; in alloc_single_from_new_slab()
2774 slab->freelist = get_freepointer(s, object); in alloc_single_from_new_slab()
2775 slab->inuse = 1; in alloc_single_from_new_slab()
2785 spin_lock_irqsave(&n->list_lock, flags); in alloc_single_from_new_slab()
2787 if (slab->inuse == slab->objects) in alloc_single_from_new_slab()
2792 inc_slabs_node(s, nid, slab->objects); in alloc_single_from_new_slab()
2793 spin_unlock_irqrestore(&n->list_lock, flags); in alloc_single_from_new_slab()
2823 if (!n || !n->nr_partial) in get_partial_node()
2826 spin_lock_irqsave(&n->list_lock, flags); in get_partial_node()
2827 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { in get_partial_node()
2828 if (!pfmemalloc_match(slab, pc->flags)) in get_partial_node()
2833 pc->orig_size); in get_partial_node()
2836 pc->object = object; in get_partial_node()
2860 spin_unlock_irqrestore(&n->list_lock, flags); in get_partial_node()
2874 enum zone_type highest_zoneidx = gfp_zone(pc->flags); in get_any_partial()
2896 if (!s->remote_node_defrag_ratio || in get_any_partial()
2897 get_cycles() % 1024 > s->remote_node_defrag_ratio) in get_any_partial()
2902 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); in get_any_partial()
2908 if (n && cpuset_zone_allowed(zone, pc->flags) && in get_any_partial()
2909 n->nr_partial > s->min_partial) { in get_any_partial()
2914 * here - if mems_allowed was updated in in get_any_partial()
2941 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) in get_partial()
2990 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
2992 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
2996 pr_warn("due to cpu change %d -> %d\n", in note_cmpxchg_failure()
3001 pr_warn("due to cpu running other code. Event %ld->%ld\n", in note_cmpxchg_failure()
3016 c = per_cpu_ptr(s->cpu_slab, cpu); in init_kmem_cache_cpus()
3017 local_lock_init(&c->lock); in init_kmem_cache_cpus()
3018 c->tid = init_tid(cpu); in init_kmem_cache_cpus()
3039 if (READ_ONCE(slab->freelist)) { in deactivate_slab()
3068 * Stage two: Unfreeze the slab while splicing the per-cpu in deactivate_slab()
3072 old.freelist = READ_ONCE(slab->freelist); in deactivate_slab()
3073 old.counters = READ_ONCE(slab->counters); in deactivate_slab()
3080 new.inuse -= free_delta; in deactivate_slab()
3094 if (!new.inuse && n->nr_partial >= s->min_partial) { in deactivate_slab()
3099 spin_lock_irqsave(&n->list_lock, flags); in deactivate_slab()
3101 spin_unlock_irqrestore(&n->list_lock, flags); in deactivate_slab()
3117 partial_slab = slab->next; in __put_partials()
3122 spin_unlock_irqrestore(&n->list_lock, flags); in __put_partials()
3125 spin_lock_irqsave(&n->list_lock, flags); in __put_partials()
3128 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { in __put_partials()
3129 slab->next = slab_to_discard; in __put_partials()
3138 spin_unlock_irqrestore(&n->list_lock, flags); in __put_partials()
3142 slab_to_discard = slab_to_discard->next; in __put_partials()
3158 local_lock_irqsave(&s->cpu_slab->lock, flags); in put_partials()
3159 partial_slab = this_cpu_read(s->cpu_slab->partial); in put_partials()
3160 this_cpu_write(s->cpu_slab->partial, NULL); in put_partials()
3161 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in put_partials()
3173 c->partial = NULL; in put_partials_cpu()
3192 local_lock_irqsave(&s->cpu_slab->lock, flags); in put_cpu_partial()
3194 oldslab = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial()
3197 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { in put_cpu_partial()
3206 slabs = oldslab->slabs; in put_cpu_partial()
3212 slab->slabs = slabs; in put_cpu_partial()
3213 slab->next = oldslab; in put_cpu_partial()
3215 this_cpu_write(s->cpu_slab->partial, slab); in put_cpu_partial()
3217 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in put_cpu_partial()
3239 local_lock_irqsave(&s->cpu_slab->lock, flags); in flush_slab()
3241 slab = c->slab; in flush_slab()
3242 freelist = c->freelist; in flush_slab()
3244 c->slab = NULL; in flush_slab()
3245 c->freelist = NULL; in flush_slab()
3246 c->tid = next_tid(c->tid); in flush_slab()
3248 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in flush_slab()
3258 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
3259 void *freelist = c->freelist; in __flush_cpu_slab()
3260 struct slab *slab = c->slab; in __flush_cpu_slab()
3262 c->slab = NULL; in __flush_cpu_slab()
3263 c->freelist = NULL; in __flush_cpu_slab()
3264 c->tid = next_tid(c->tid); in __flush_cpu_slab()
3293 s = sfw->s; in flush_cpu_slab()
3294 c = this_cpu_ptr(s->cpu_slab); in flush_cpu_slab()
3296 if (c->slab) in flush_cpu_slab()
3304 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
3306 return c->slab || slub_percpu_partial(c); in has_cpu_slab()
3323 sfw->skip = true; in flush_all_cpus_locked()
3326 INIT_WORK(&sfw->work, flush_cpu_slab); in flush_all_cpus_locked()
3327 sfw->skip = false; in flush_all_cpus_locked()
3328 sfw->s = s; in flush_all_cpus_locked()
3329 queue_work_on(cpu, flushwq, &sfw->work); in flush_all_cpus_locked()
3334 if (sfw->skip) in flush_all_cpus_locked()
3336 flush_work(&sfw->work); in flush_all_cpus_locked()
3387 return slab->objects - slab->inuse; in count_free()
3392 return atomic_long_read(&n->total_objects); in node_nr_objs()
3404 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
3409 if (slab->inuse < *bulk_cnt) { in free_debug_processing()
3411 slab->inuse, *bulk_cnt); in free_debug_processing()
3420 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
3425 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
3462 spin_lock_irqsave(&n->list_lock, flags); in count_partial()
3463 list_for_each_entry(slab, &n->partial, slab_list) in count_partial()
3465 spin_unlock_irqrestore(&n->list_lock, flags); in count_partial()
3479 spin_lock_irqsave(&n->list_lock, flags); in count_partial_free_approx()
3480 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) { in count_partial_free_approx()
3481 list_for_each_entry(slab, &n->partial, slab_list) in count_partial_free_approx()
3482 x += slab->objects - slab->inuse; in count_partial_free_approx()
3491 list_for_each_entry(slab, &n->partial, slab_list) { in count_partial_free_approx()
3492 x += slab->objects - slab->inuse; in count_partial_free_approx()
3496 list_for_each_entry_reverse(slab, &n->partial, slab_list) { in count_partial_free_approx()
3497 x += slab->objects - slab->inuse; in count_partial_free_approx()
3501 x = mult_frac(x, n->nr_partial, scanned); in count_partial_free_approx()
3504 spin_unlock_irqrestore(&n->list_lock, flags); in count_partial_free_approx()
3523 s->name, s->object_size, s->size, oo_order(s->oo), in slab_out_of_memory()
3524 oo_order(s->min)); in slab_out_of_memory()
3526 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory()
3528 s->name); in slab_out_of_memory()
3565 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, in __update_cpu_freelist_fast()
3570 * Check the slab->freelist and either transfer the freelist to the
3583 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); in get_freelist()
3586 freelist = slab->freelist; in get_freelist()
3587 counters = slab->counters; in get_freelist()
3591 new.inuse = slab->objects; in get_freelist()
3612 freelist = slab->freelist; in freeze_slab()
3613 counters = slab->counters; in freeze_slab()
3618 new.inuse = slab->objects; in freeze_slab()
3661 slab = READ_ONCE(c->slab); in ___slab_alloc()
3689 * information when the page leaves the per-cpu allocator in ___slab_alloc()
3694 /* must check again c->slab in case we got preempted and it changed */ in ___slab_alloc()
3695 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3696 if (unlikely(slab != c->slab)) { in ___slab_alloc()
3697 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3700 freelist = c->freelist; in ___slab_alloc()
3707 c->slab = NULL; in ___slab_alloc()
3708 c->tid = next_tid(c->tid); in ___slab_alloc()
3709 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3718 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); in ___slab_alloc()
3725 VM_BUG_ON(!c->slab->frozen); in ___slab_alloc()
3726 c->freelist = get_freepointer(s, freelist); in ___slab_alloc()
3727 c->tid = next_tid(c->tid); in ___slab_alloc()
3728 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3733 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3734 if (slab != c->slab) { in ___slab_alloc()
3735 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3738 freelist = c->freelist; in ___slab_alloc()
3739 c->slab = NULL; in ___slab_alloc()
3740 c->freelist = NULL; in ___slab_alloc()
3741 c->tid = next_tid(c->tid); in ___slab_alloc()
3742 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3749 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3750 if (unlikely(c->slab)) { in ___slab_alloc()
3751 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3755 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3765 c->slab = slab; in ___slab_alloc()
3772 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3774 slab->next = NULL; in ___slab_alloc()
3807 if (s->flags & SLAB_STORE_USER) in ___slab_alloc()
3817 slub_put_cpu_ptr(s->cpu_slab); in ___slab_alloc()
3819 c = slub_get_cpu_ptr(s->cpu_slab); in ___slab_alloc()
3839 if (s->flags & SLAB_STORE_USER) in ___slab_alloc()
3849 freelist = slab->freelist; in ___slab_alloc()
3850 slab->freelist = NULL; in ___slab_alloc()
3851 slab->inuse = slab->objects; in ___slab_alloc()
3852 slab->frozen = 1; in ___slab_alloc()
3854 inc_slabs_node(s, slab_nid(slab), slab->objects); in ___slab_alloc()
3867 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3868 if (unlikely(c->slab)) { in ___slab_alloc()
3869 void *flush_freelist = c->freelist; in ___slab_alloc()
3870 struct slab *flush_slab = c->slab; in ___slab_alloc()
3872 c->slab = NULL; in ___slab_alloc()
3873 c->freelist = NULL; in ___slab_alloc()
3874 c->tid = next_tid(c->tid); in ___slab_alloc()
3876 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3884 c->slab = slab; in ___slab_alloc()
3905 c = slub_get_cpu_ptr(s->cpu_slab); in __slab_alloc()
3910 slub_put_cpu_ptr(s->cpu_slab); in __slab_alloc()
3936 c = raw_cpu_ptr(s->cpu_slab); in __slab_alloc_node()
3937 tid = READ_ONCE(c->tid); in __slab_alloc_node()
3940 * Irqless object alloc/free algorithm used here depends on sequence in __slab_alloc_node()
3956 object = c->freelist; in __slab_alloc_node()
3957 slab = c->slab; in __slab_alloc_node()
4027 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), in maybe_wipe_obj_freeptr()
4049 unsigned int zero_size = s->object_size; in slab_post_alloc_hook()
4063 (s->flags & SLAB_KMALLOC)) in slab_post_alloc_hook()
4070 * cause false-positive reports. This does not lead to a performance in slab_post_alloc_hook()
4089 kmemleak_alloc_recursive(p[i], s->object_size, 1, in slab_post_alloc_hook()
4090 s->flags, init_flags); in slab_post_alloc_hook()
4130 * @orig_size bytes might be zeroed instead of s->object_size in slab_alloc_node()
4142 s->object_size); in kmem_cache_alloc_noprof()
4154 s->object_size); in kmem_cache_alloc_lru_noprof()
4172 * kmem_cache_alloc_node - Allocate an object on the specified node
4186 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); in kmem_cache_alloc_node_noprof()
4265 trace_kmalloc(caller, ret, size, s->size, flags, node); in __do_kmalloc_node()
4293 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); in __kmalloc_cache_noprof()
4305 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); in __kmalloc_cache_node_noprof()
4323 if (s->flags & SLAB_STORE_USER) in free_to_partial_list()
4326 spin_lock_irqsave(&n->list_lock, flags); in free_to_partial_list()
4329 void *prior = slab->freelist; in free_to_partial_list()
4332 slab->inuse -= cnt; in free_to_partial_list()
4334 slab->freelist = head; in free_to_partial_list()
4341 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) in free_to_partial_list()
4359 * Update the counters while still holding n->list_lock to in free_to_partial_list()
4362 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); in free_to_partial_list()
4365 spin_unlock_irqrestore(&n->list_lock, flags); in free_to_partial_list()
4403 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
4406 prior = slab->freelist; in __slab_free()
4407 counters = slab->counters; in __slab_free()
4411 new.inuse -= cnt; in __slab_free()
4425 spin_lock_irqsave(&n->list_lock, flags); in __slab_free()
4457 * This slab was partially empty but not on the per-node partial list, in __slab_free()
4461 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
4465 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
4476 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
4488 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
4524 c = raw_cpu_ptr(s->cpu_slab); in do_slab_free()
4525 tid = READ_ONCE(c->tid); in do_slab_free()
4530 if (unlikely(slab != c->slab)) { in do_slab_free()
4536 freelist = READ_ONCE(c->freelist); in do_slab_free()
4546 local_lock(&s->cpu_slab->lock); in do_slab_free()
4547 c = this_cpu_ptr(s->cpu_slab); in do_slab_free()
4548 if (unlikely(slab != c->slab)) { in do_slab_free()
4549 local_unlock(&s->cpu_slab->lock); in do_slab_free()
4552 tid = c->tid; in do_slab_free()
4553 freelist = c->freelist; in do_slab_free()
4556 c->freelist = head; in do_slab_free()
4557 c->tid = next_tid(tid); in do_slab_free()
4559 local_unlock(&s->cpu_slab->lock); in do_slab_free()
4584 /* Do not inline the rare memcg charging failed path into the allocation path */
4612 void *object = delayed_free->object; in slab_free_after_rcu_debug()
4624 s = slab->slab_cache; in slab_free_after_rcu_debug()
4625 if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU))) in slab_free_after_rcu_debug()
4648 return slab->slab_cache; in virt_to_cache()
4662 __func__, s->name, cachep->name)) in cache_from_obj()
4668 * kmem_cache_free - Deallocate an object
4697 -(PAGE_SIZE << order)); in free_large_kmalloc()
4702 * kfree - free previously allocated memory
4726 s = slab->slab_cache; in kfree()
4760 object = p[--size]; in build_detached_freelist()
4766 df->slab = NULL; in build_detached_freelist()
4770 df->slab = folio_slab(folio); in build_detached_freelist()
4771 df->s = df->slab->slab_cache; in build_detached_freelist()
4773 df->slab = folio_slab(folio); in build_detached_freelist()
4774 df->s = cache_from_obj(s, object); /* Support for memcg */ in build_detached_freelist()
4778 df->tail = object; in build_detached_freelist()
4779 df->freelist = object; in build_detached_freelist()
4780 df->cnt = 1; in build_detached_freelist()
4785 set_freepointer(df->s, object, NULL); in build_detached_freelist()
4789 object = p[--size]; in build_detached_freelist()
4790 /* df->slab is always set at this point */ in build_detached_freelist()
4791 if (df->slab == virt_to_slab(object)) { in build_detached_freelist()
4793 set_freepointer(df->s, object, df->freelist); in build_detached_freelist()
4794 df->freelist = object; in build_detached_freelist()
4795 df->cnt++; in build_detached_freelist()
4796 same--; in build_detached_freelist()
4803 if (!--lookahead) in build_detached_freelist()
4867 c = slub_get_cpu_ptr(s->cpu_slab); in __kmem_cache_alloc_bulk()
4868 local_lock_irqsave(&s->cpu_slab->lock, irqflags); in __kmem_cache_alloc_bulk()
4871 void *object = kfence_alloc(s, s->object_size, flags); in __kmem_cache_alloc_bulk()
4878 object = c->freelist; in __kmem_cache_alloc_bulk()
4881 * We may have removed an object from c->freelist using in __kmem_cache_alloc_bulk()
4883 * c->tid has not been bumped yet. in __kmem_cache_alloc_bulk()
4885 * allocating memory, we should bump c->tid now. in __kmem_cache_alloc_bulk()
4887 c->tid = next_tid(c->tid); in __kmem_cache_alloc_bulk()
4889 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); in __kmem_cache_alloc_bulk()
4892 * Invoking slow path likely have side-effect in __kmem_cache_alloc_bulk()
4893 * of re-populating per CPU c->freelist in __kmem_cache_alloc_bulk()
4896 _RET_IP_, c, s->object_size); in __kmem_cache_alloc_bulk()
4900 c = this_cpu_ptr(s->cpu_slab); in __kmem_cache_alloc_bulk()
4903 local_lock_irqsave(&s->cpu_slab->lock, irqflags); in __kmem_cache_alloc_bulk()
4905 continue; /* goto for-loop */ in __kmem_cache_alloc_bulk()
4907 c->freelist = get_freepointer(s, object); in __kmem_cache_alloc_bulk()
4912 c->tid = next_tid(c->tid); in __kmem_cache_alloc_bulk()
4913 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); in __kmem_cache_alloc_bulk()
4914 slub_put_cpu_ptr(s->cpu_slab); in __kmem_cache_alloc_bulk()
4919 slub_put_cpu_ptr(s->cpu_slab); in __kmem_cache_alloc_bulk()
4931 void *object = kfence_alloc(s, s->object_size, flags); in __kmem_cache_alloc_bulk()
4939 _RET_IP_, s->object_size); in __kmem_cache_alloc_bulk()
4976 slab_want_init_on_alloc(flags, s), s->object_size))) { in kmem_cache_alloc_bulk_noprof()
5083 return get_order(size * MAX_OBJS_PER_PAGE) - 1; in calculate_order()
5091 * smallest order from min_objects-derived/slab_min_order up to in calculate_order()
5113 return -ENOSYS; in calculate_order()
5119 n->nr_partial = 0; in init_kmem_cache_node()
5120 spin_lock_init(&n->list_lock); in init_kmem_cache_node()
5121 INIT_LIST_HEAD(&n->partial); in init_kmem_cache_node()
5123 atomic_long_set(&n->nr_slabs, 0); in init_kmem_cache_node()
5124 atomic_long_set(&n->total_objects, 0); in init_kmem_cache_node()
5125 INIT_LIST_HEAD(&n->full); in init_kmem_cache_node()
5140 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), in alloc_kmem_cache_cpus()
5143 if (!s->cpu_slab) in alloc_kmem_cache_cpus()
5173 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); in early_kmem_cache_node_alloc()
5183 n = slab->freelist; in early_kmem_cache_node_alloc()
5189 slab->freelist = get_freepointer(kmem_cache_node, n); in early_kmem_cache_node_alloc()
5190 slab->inuse = 1; in early_kmem_cache_node_alloc()
5191 kmem_cache_node->node[node] = n; in early_kmem_cache_node_alloc()
5193 inc_slabs_node(kmem_cache_node, node, slab->objects); in early_kmem_cache_node_alloc()
5208 s->node[node] = NULL; in free_kmem_cache_nodes()
5217 free_percpu(s->cpu_slab); in __kmem_cache_release()
5242 s->node[node] = n; in init_kmem_cache_nodes()
5267 else if (s->size >= PAGE_SIZE) in set_cpu_partial()
5269 else if (s->size >= 1024) in set_cpu_partial()
5271 else if (s->size >= 256) in set_cpu_partial()
5286 slab_flags_t flags = s->flags; in calculate_sizes()
5287 unsigned int size = s->object_size; in calculate_sizes()
5304 !s->ctor) in calculate_sizes()
5305 s->flags |= __OBJECT_POISON; in calculate_sizes()
5307 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
5315 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
5323 s->inuse = size; in calculate_sizes()
5325 if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) || in calculate_sizes()
5326 (flags & SLAB_POISON) || s->ctor || in calculate_sizes()
5328 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { in calculate_sizes()
5340 * The assumption that s->offset >= s->inuse means free in calculate_sizes()
5345 s->offset = size; in calculate_sizes()
5347 } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) { in calculate_sizes()
5348 s->offset = args->freeptr_offset; in calculate_sizes()
5355 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); in calculate_sizes()
5372 kasan_cache_create(s, &size, &s->flags); in calculate_sizes()
5384 s->red_left_pad = sizeof(void *); in calculate_sizes()
5385 s->red_left_pad = ALIGN(s->red_left_pad, s->align); in calculate_sizes()
5386 size += s->red_left_pad; in calculate_sizes()
5395 size = ALIGN(size, s->align); in calculate_sizes()
5396 s->size = size; in calculate_sizes()
5397 s->reciprocal_size = reciprocal_value(size); in calculate_sizes()
5403 s->allocflags = __GFP_COMP; in calculate_sizes()
5405 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
5406 s->allocflags |= GFP_DMA; in calculate_sizes()
5408 if (s->flags & SLAB_CACHE_DMA32) in calculate_sizes()
5409 s->allocflags |= GFP_DMA32; in calculate_sizes()
5411 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
5412 s->allocflags |= __GFP_RECLAIMABLE; in calculate_sizes()
5417 s->oo = oo_make(order, size); in calculate_sizes()
5418 s->min = oo_make(get_order(size), size); in calculate_sizes()
5420 return !!oo_objects(s->oo); in calculate_sizes()
5430 slab_err(s, slab, text, s->name); in list_slab_objects()
5435 for_each_object(p, s, addr, slab->objects) { in list_slab_objects()
5440 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); in list_slab_objects()
5459 spin_lock_irq(&n->list_lock); in free_partial()
5460 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { in free_partial()
5461 if (!slab->inuse) { in free_partial()
5463 list_add(&slab->slab_list, &discard); in free_partial()
5469 spin_unlock_irq(&n->list_lock); in free_partial()
5481 if (n->nr_partial || node_nr_slabs(n)) in __kmem_cache_empty()
5498 if (n->nr_partial || node_nr_slabs(n)) in __kmem_cache_shutdown()
5512 struct kmem_cache *s = slab->slab_cache; in __kmem_obj_info()
5515 kpp->kp_ptr = object; in __kmem_obj_info()
5516 kpp->kp_slab = slab; in __kmem_obj_info()
5517 kpp->kp_slab_cache = s; in __kmem_obj_info()
5526 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); in __kmem_obj_info()
5527 objp = base + s->size * objnr; in __kmem_obj_info()
5528 kpp->kp_objp = objp; in __kmem_obj_info()
5529 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size in __kmem_obj_info()
5530 || (objp - base) % s->size) || in __kmem_obj_info()
5531 !(s->flags & SLAB_STORE_USER)) in __kmem_obj_info()
5536 kpp->kp_ret = (void *)trackp->addr; in __kmem_obj_info()
5543 handle = READ_ONCE(trackp->handle); in __kmem_obj_info()
5547 kpp->kp_stack[i] = (void *)entries[i]; in __kmem_obj_info()
5551 handle = READ_ONCE(trackp->handle); in __kmem_obj_info()
5555 kpp->kp_free_stack[i] = (void *)entries[i]; in __kmem_obj_info()
5624 s = slab->slab_cache; in __check_heap_object()
5633 offset = ptr - kfence_object_start(ptr); in __check_heap_object()
5635 offset = (ptr - slab_address(slab)) % s->size; in __check_heap_object()
5639 if (offset < s->red_left_pad) in __check_heap_object()
5641 s->name, to_user, offset, n); in __check_heap_object()
5642 offset -= s->red_left_pad; in __check_heap_object()
5646 if (offset >= s->useroffset && in __check_heap_object()
5647 offset - s->useroffset <= s->usersize && in __check_heap_object()
5648 n <= s->useroffset - offset + s->usersize) in __check_heap_object()
5651 usercopy_abort("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
5683 spin_lock_irqsave(&n->list_lock, flags); in __kmem_cache_do_shrink()
5689 * list_lock. slab->inuse here is the upper limit. in __kmem_cache_do_shrink()
5691 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { in __kmem_cache_do_shrink()
5692 int free = slab->objects - slab->inuse; in __kmem_cache_do_shrink()
5694 /* Do not reread slab->inuse */ in __kmem_cache_do_shrink()
5700 if (free == slab->objects) { in __kmem_cache_do_shrink()
5701 list_move(&slab->slab_list, &discard); in __kmem_cache_do_shrink()
5703 n->nr_partial--; in __kmem_cache_do_shrink()
5704 dec_slabs_node(s, node, slab->objects); in __kmem_cache_do_shrink()
5706 list_move(&slab->slab_list, promote + free - 1); in __kmem_cache_do_shrink()
5713 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) in __kmem_cache_do_shrink()
5714 list_splice(promote + i, &n->partial); in __kmem_cache_do_shrink()
5716 spin_unlock_irqrestore(&n->list_lock, flags); in __kmem_cache_do_shrink()
5754 offline_node = marg->status_change_nid_normal; in slab_mem_offline_callback()
5778 int nid = marg->status_change_nid_normal; in slab_mem_going_online_callback()
5808 ret = -ENOMEM; in slab_mem_going_online_callback()
5812 s->node[nid] = n; in slab_mem_going_online_callback()
5867 memcpy(s, static_cache, kmem_cache->object_size); in bootstrap()
5878 list_for_each_entry(p, &n->partial, slab_list) in bootstrap()
5879 p->slab_cache = s; in bootstrap()
5882 list_for_each_entry(p, &n->full, slab_list) in bootstrap()
5883 p->slab_cache = s; in bootstrap()
5886 list_add(&s->list, &slab_caches); in bootstrap()
5940 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", in kmem_cache_init()
5965 s->refcount++; in __kmem_cache_alias()
5971 s->object_size = max(s->object_size, size); in __kmem_cache_alias()
5972 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
5982 int err = -EINVAL; in do_kmem_cache_create()
5984 s->name = name; in do_kmem_cache_create()
5985 s->size = s->object_size = size; in do_kmem_cache_create()
5987 s->flags = kmem_cache_flags(flags, s->name); in do_kmem_cache_create()
5989 s->random = get_random_long(); in do_kmem_cache_create()
5991 s->align = args->align; in do_kmem_cache_create()
5992 s->ctor = args->ctor; in do_kmem_cache_create()
5994 s->useroffset = args->useroffset; in do_kmem_cache_create()
5995 s->usersize = args->usersize; in do_kmem_cache_create()
6005 if (get_order(s->size) > get_order(s->object_size)) { in do_kmem_cache_create()
6006 s->flags &= ~DEBUG_METADATA_FLAGS; in do_kmem_cache_create()
6007 s->offset = 0; in do_kmem_cache_create()
6014 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { in do_kmem_cache_create()
6016 s->flags |= __CMPXCHG_DOUBLE; in do_kmem_cache_create()
6024 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); in do_kmem_cache_create()
6025 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); in do_kmem_cache_create()
6030 s->remote_node_defrag_ratio = 1000; in do_kmem_cache_create()
6033 /* Initialize the pre-computed randomized freelist if slab is up */ in do_kmem_cache_create()
6055 if (s->flags & SLAB_STORE_USER) in do_kmem_cache_create()
6067 return slab->inuse; in count_inuse()
6072 return slab->objects; in count_total()
6088 for_each_object(p, s, addr, slab->objects) { in validate_slab()
6104 spin_lock_irqsave(&n->list_lock, flags); in validate_slab_node()
6106 list_for_each_entry(slab, &n->partial, slab_list) { in validate_slab_node()
6110 if (count != n->nr_partial) { in validate_slab_node()
6112 s->name, count, n->nr_partial); in validate_slab_node()
6116 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
6119 list_for_each_entry(slab, &n->full, slab_list) { in validate_slab_node()
6125 s->name, count, node_nr_slabs(n)); in validate_slab_node()
6130 spin_unlock_irqrestore(&n->list_lock, flags); in validate_slab_node()
6141 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); in validate_slab_cache()
6143 return -ENOMEM; in validate_slab_cache()
6186 if (t->max) in free_loc_track()
6187 free_pages((unsigned long)t->loc, in free_loc_track()
6188 get_order(sizeof(struct location) * t->max)); in free_loc_track()
6202 if (t->count) { in alloc_loc_track()
6203 memcpy(l, t->loc, sizeof(struct location) * t->count); in alloc_loc_track()
6206 t->max = max; in alloc_loc_track()
6207 t->loc = l; in alloc_loc_track()
6218 unsigned long age = jiffies - track->when; in add_location()
6220 unsigned int waste = s->object_size - orig_size; in add_location()
6223 handle = READ_ONCE(track->handle); in add_location()
6225 start = -1; in add_location()
6226 end = t->count; in add_location()
6229 pos = start + (end - start + 1) / 2; in add_location()
6238 l = &t->loc[pos]; in add_location()
6239 caddr = l->addr; in add_location()
6240 chandle = l->handle; in add_location()
6241 cwaste = l->waste; in add_location()
6242 if ((track->addr == caddr) && (handle == chandle) && in add_location()
6245 l->count++; in add_location()
6246 if (track->when) { in add_location()
6247 l->sum_time += age; in add_location()
6248 if (age < l->min_time) in add_location()
6249 l->min_time = age; in add_location()
6250 if (age > l->max_time) in add_location()
6251 l->max_time = age; in add_location()
6253 if (track->pid < l->min_pid) in add_location()
6254 l->min_pid = track->pid; in add_location()
6255 if (track->pid > l->max_pid) in add_location()
6256 l->max_pid = track->pid; in add_location()
6258 cpumask_set_cpu(track->cpu, in add_location()
6259 to_cpumask(l->cpus)); in add_location()
6261 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location()
6265 if (track->addr < caddr) in add_location()
6267 else if (track->addr == caddr && handle < chandle) in add_location()
6269 else if (track->addr == caddr && handle == chandle && in add_location()
6279 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) in add_location()
6282 l = t->loc + pos; in add_location()
6283 if (pos < t->count) in add_location()
6285 (t->count - pos) * sizeof(struct location)); in add_location()
6286 t->count++; in add_location()
6287 l->count = 1; in add_location()
6288 l->addr = track->addr; in add_location()
6289 l->sum_time = age; in add_location()
6290 l->min_time = age; in add_location()
6291 l->max_time = age; in add_location()
6292 l->min_pid = track->pid; in add_location()
6293 l->max_pid = track->pid; in add_location()
6294 l->handle = handle; in add_location()
6295 l->waste = waste; in add_location()
6296 cpumask_clear(to_cpumask(l->cpus)); in add_location()
6297 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); in add_location()
6298 nodes_clear(l->nodes); in add_location()
6299 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location()
6313 for_each_object(p, s, addr, slab->objects) in process_slab()
6317 s->object_size); in process_slab()
6348 return -ENOMEM; in show_slab_objects()
6354 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
6359 slab = READ_ONCE(c->slab); in show_slab_objects()
6365 x = slab->objects; in show_slab_objects()
6367 x = slab->inuse; in show_slab_objects()
6383 x = data_race(slab->slabs); in show_slab_objects()
6395 * mem_hotplug_lock->slab_mutex->kernfs_mutex in show_slab_objects()
6399 * unplug code doesn't destroy the kmem_cache->node[] data. in show_slab_objects()
6411 x = node_nr_objs(n) - count_partial(n, count_free); in show_slab_objects()
6429 x = n->nr_partial; in show_slab_objects()
6466 return sysfs_emit(buf, "%u\n", s->size); in slab_size_show()
6472 return sysfs_emit(buf, "%u\n", s->align); in align_show()
6478 return sysfs_emit(buf, "%u\n", s->object_size); in object_size_show()
6484 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); in objs_per_slab_show()
6490 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); in order_show()
6496 return sysfs_emit(buf, "%lu\n", s->min_partial); in min_partial_show()
6509 s->min_partial = min; in min_partial_store()
6518 nr_partial = s->cpu_partial; in cpu_partial_show()
6534 return -EINVAL; in cpu_partial_store()
6544 if (!s->ctor) in ctor_show()
6546 return sysfs_emit(buf, "%pS\n", s->ctor); in ctor_show()
6552 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); in aliases_show()
6585 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
6588 slabs += data_race(slab->slabs); in slabs_cpu_partial_show()
6592 /* Approximate half-full slabs, see slub_set_cpu_partial() */ in slabs_cpu_partial_show()
6593 objects = (slabs * oo_objects(s->oo)) / 2; in slabs_cpu_partial_show()
6600 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
6602 slabs = data_race(slab->slabs); in slabs_cpu_partial_show()
6603 objects = (slabs * oo_objects(s->oo)) / 2; in slabs_cpu_partial_show()
6617 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
6623 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
6630 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
6638 return sysfs_emit(buf, "%u\n", s->usersize); in usersize_show()
6645 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); in destroy_by_rcu_show()
6670 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); in sanity_checks_show()
6676 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
6682 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
6689 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
6696 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
6709 int ret = -EINVAL; in validate_store()
6725 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
6731 if (s->refcount > 1) in failslab_store()
6732 return -EINVAL; in failslab_store()
6735 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); in failslab_store()
6737 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); in failslab_store()
6755 return -EINVAL; in shrink_store()
6763 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); in remote_node_defrag_ratio_show()
6776 return -ERANGE; in remote_node_defrag_ratio_store()
6778 s->remote_node_defrag_ratio = ratio * 10; in remote_node_defrag_ratio_store()
6794 return -ENOMEM; in show_stat()
6797 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
6823 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
6835 return -EINVAL; \
6872 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); in skip_kfence_show()
6881 s->flags &= ~SLAB_SKIP_KFENCE; in skip_kfence_store()
6883 s->flags |= SLAB_SKIP_KFENCE; in skip_kfence_store()
6885 ret = -EINVAL; in skip_kfence_store()
6982 if (!attribute->show) in slab_attr_show()
6983 return -EIO; in slab_attr_show()
6985 return attribute->show(s, buf); in slab_attr_show()
6998 if (!attribute->store) in slab_attr_store()
6999 return -EIO; in slab_attr_store()
7001 return attribute->store(s, buf, len); in slab_attr_store()
7030 * Format :[flags-]size
7038 return ERR_PTR(-ENOMEM); in create_unique_id()
7048 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
7050 if (s->flags & SLAB_CACHE_DMA32) in create_unique_id()
7052 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
7054 if (s->flags & SLAB_CONSISTENCY_CHECKS) in create_unique_id()
7056 if (s->flags & SLAB_ACCOUNT) in create_unique_id()
7059 *p++ = '-'; in create_unique_id()
7060 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); in create_unique_id()
7062 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { in create_unique_id()
7064 return ERR_PTR(-EINVAL); in create_unique_id()
7066 kmsan_unpoison_memory(name, p - name); in create_unique_id()
7087 sysfs_remove_link(&slab_kset->kobj, s->name); in sysfs_slab_add()
7088 name = s->name; in sysfs_slab_add()
7099 s->kobj.kset = kset; in sysfs_slab_add()
7100 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); in sysfs_slab_add()
7104 err = sysfs_create_group(&s->kobj, &slab_attr_group); in sysfs_slab_add()
7110 sysfs_slab_alias(s, s->name); in sysfs_slab_add()
7117 kobject_del(&s->kobj); in sysfs_slab_add()
7123 kobject_del(&s->kobj); in sysfs_slab_unlink()
7128 kobject_put(&s->kobj); in sysfs_slab_release()
7151 sysfs_remove_link(&slab_kset->kobj, name); in sysfs_slab_alias()
7152 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); in sysfs_slab_alias()
7157 return -ENOMEM; in sysfs_slab_alias()
7159 al->s = s; in sysfs_slab_alias()
7160 al->name = name; in sysfs_slab_alias()
7161 al->next = alias_list; in sysfs_slab_alias()
7178 return -ENOMEM; in slab_sysfs_init()
7187 s->name); in slab_sysfs_init()
7193 alias_list = alias_list->next; in slab_sysfs_init()
7194 err = sysfs_slab_alias(al->s, al->name); in slab_sysfs_init()
7197 al->name); in slab_sysfs_init()
7210 struct loc_track *t = seq->private; in slab_debugfs_show()
7214 idx = (unsigned long) t->idx; in slab_debugfs_show()
7215 if (idx < t->count) { in slab_debugfs_show()
7216 l = &t->loc[idx]; in slab_debugfs_show()
7218 seq_printf(seq, "%7ld ", l->count); in slab_debugfs_show()
7220 if (l->addr) in slab_debugfs_show()
7221 seq_printf(seq, "%pS", (void *)l->addr); in slab_debugfs_show()
7223 seq_puts(seq, "<not-available>"); in slab_debugfs_show()
7225 if (l->waste) in slab_debugfs_show()
7227 l->count * l->waste, l->waste); in slab_debugfs_show()
7229 if (l->sum_time != l->min_time) { in slab_debugfs_show()
7231 l->min_time, div_u64(l->sum_time, l->count), in slab_debugfs_show()
7232 l->max_time); in slab_debugfs_show()
7234 seq_printf(seq, " age=%ld", l->min_time); in slab_debugfs_show()
7236 if (l->min_pid != l->max_pid) in slab_debugfs_show()
7237 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); in slab_debugfs_show()
7240 l->min_pid); in slab_debugfs_show()
7242 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) in slab_debugfs_show()
7244 cpumask_pr_args(to_cpumask(l->cpus))); in slab_debugfs_show()
7246 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) in slab_debugfs_show()
7248 nodemask_pr_args(&l->nodes)); in slab_debugfs_show()
7256 handle = READ_ONCE(l->handle); in slab_debugfs_show()
7268 if (!idx && !t->count) in slab_debugfs_show()
7280 struct loc_track *t = seq->private; in slab_debugfs_next()
7282 t->idx = ++(*ppos); in slab_debugfs_next()
7283 if (*ppos <= t->count) in slab_debugfs_next()
7294 if (loc1->count > loc2->count) in cmp_loc_by_count()
7295 return -1; in cmp_loc_by_count()
7302 struct loc_track *t = seq->private; in slab_debugfs_start()
7304 t->idx = *ppos; in slab_debugfs_start()
7323 struct kmem_cache *s = file_inode(filep)->i_private; in slab_debug_trace_open()
7327 return -ENOMEM; in slab_debug_trace_open()
7329 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); in slab_debug_trace_open()
7332 return -ENOMEM; in slab_debug_trace_open()
7335 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) in slab_debug_trace_open()
7343 return -ENOMEM; in slab_debug_trace_open()
7353 spin_lock_irqsave(&n->list_lock, flags); in slab_debug_trace_open()
7354 list_for_each_entry(slab, &n->partial, slab_list) in slab_debug_trace_open()
7356 list_for_each_entry(slab, &n->full, slab_list) in slab_debug_trace_open()
7358 spin_unlock_irqrestore(&n->list_lock, flags); in slab_debug_trace_open()
7362 sort_r(t->loc, t->count, sizeof(struct location), in slab_debug_trace_open()
7371 struct seq_file *seq = file->private_data; in slab_debug_trace_release()
7372 struct loc_track *t = seq->private; in slab_debug_trace_release()
7392 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); in debugfs_slab_add()
7403 debugfs_lookup_and_remove(s->name, slab_debugfs_root); in debugfs_slab_release()
7413 if (s->flags & SLAB_STORE_USER) in slab_debugfs_init()
7439 sinfo->active_objs = nr_objs - nr_free; in get_slabinfo()
7440 sinfo->num_objs = nr_objs; in get_slabinfo()
7441 sinfo->active_slabs = nr_slabs; in get_slabinfo()
7442 sinfo->num_slabs = nr_slabs; in get_slabinfo()
7443 sinfo->objects_per_slab = oo_objects(s->oo); in get_slabinfo()
7444 sinfo->cache_order = oo_order(s->oo); in get_slabinfo()