Home
last modified time | relevance | path

Searched refs:slab (Results 1 – 25 of 98) sorted by relevance

1234

/linux-6.12.1/mm/
Dslab.h52 struct slab { struct
62 struct slab *next; argument
96 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) argument
106 static_assert(sizeof(struct slab) <= sizeof(struct page));
108 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
121 const struct folio *: (const struct slab *)(folio), \
122 struct folio *: (struct slab *)(folio)))
136 const struct slab *: (const struct folio *)s, \
137 struct slab *: (struct folio *)s))
152 const struct page *: (const struct slab *)(p), \
[all …]
Dslub.c392 struct slab *slab; /* The slab from which we are allocating */ member
394 struct slab *partial; /* Partially allocated slabs */
641 static __always_inline void slab_lock(struct slab *slab) in slab_lock() argument
643 bit_spin_lock(PG_locked, &slab->__page_flags); in slab_lock()
646 static __always_inline void slab_unlock(struct slab *slab) in slab_unlock() argument
648 bit_spin_unlock(PG_locked, &slab->__page_flags); in slab_unlock()
652 __update_freelist_fast(struct slab *slab, in __update_freelist_fast() argument
660 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); in __update_freelist_fast()
667 __update_freelist_slow(struct slab *slab, in __update_freelist_slow() argument
673 slab_lock(slab); in __update_freelist_slow()
[all …]
/linux-6.12.1/drivers/md/dm-vdo/
Dslab-depot.c54 static bool is_slab_open(struct vdo_slab *slab) in is_slab_open() argument
56 return (!vdo_is_state_quiescing(&slab->state) && in is_slab_open()
57 !vdo_is_state_quiescent(&slab->state)); in is_slab_open()
68 return ((journal->slab->status != VDO_SLAB_REBUILDING) && in must_make_entries_to_flush()
135 static bool is_slab_journal_blank(const struct vdo_slab *slab) in is_slab_journal_blank() argument
137 return ((slab->journal.tail == 1) && in is_slab_journal_blank()
138 (slab->journal.tail_header.entry_count == 0)); in is_slab_journal_blank()
150 struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals; in mark_slab_journal_dirty()
169 static void check_if_slab_drained(struct vdo_slab *slab) in check_if_slab_drained() argument
172 struct slab_journal *journal = &slab->journal; in check_if_slab_drained()
[all …]
Dslab-depot.h77 struct vdo_slab *slab; member
160 struct vdo_slab *slab; member
293 struct vdo_slab *slab; member
506 bool __must_check vdo_attempt_replay_into_slab(struct vdo_slab *slab,
523 int __must_check vdo_acquire_provisional_reference(struct vdo_slab *slab,
/linux-6.12.1/tools/mm/
Dslabinfo.c54 struct slabinfo *slab; member
357 if (a->slab == find && in find_one_alias()
1143 a->slab = s; in link_slabs()
1162 if (!show_single_ref && a->slab->refs == 1) in alias()
1167 if (strcmp(a->slab->name, active) == 0) { in alias()
1172 printf("\n%-12s <- %s", a->slab->name, a->name); in alias()
1173 active = a->slab->name; in alias()
1176 printf("%-15s -> %s\n", a->name, a->slab->name); in alias()
1206 static int slab_mismatch(char *slab) in slab_mismatch() argument
1208 return regexec(&pattern, slab, 0, NULL, 0); in slab_mismatch()
[all …]
/linux-6.12.1/Documentation/ABI/testing/
Dsysfs-kernel-slab1 What: /sys/kernel/slab
7 The /sys/kernel/slab directory contains a snapshot of the
13 What: /sys/kernel/slab/<cache>/aliases
22 What: /sys/kernel/slab/<cache>/align
31 What: /sys/kernel/slab/<cache>/alloc_calls
42 What: /sys/kernel/slab/<cache>/alloc_fastpath
53 What: /sys/kernel/slab/<cache>/alloc_from_partial
59 The alloc_from_partial file shows how many times a cpu slab has
60 been full and it has been refilled by using a slab from the list
65 What: /sys/kernel/slab/<cache>/alloc_refill
[all …]
/linux-6.12.1/mm/kasan/
Dcommon.c35 struct slab *kasan_addr_to_slab(const void *addr) in kasan_addr_to_slab()
145 void __kasan_poison_slab(struct slab *slab) in __kasan_poison_slab() argument
147 struct page *page = slab_page(slab); in __kasan_poison_slab()
440 struct slab *slab; in __kasan_krealloc() local
458 slab = virt_to_slab(object); in __kasan_krealloc()
461 if (unlikely(!slab)) in __kasan_krealloc()
464 poison_kmalloc_redzone(slab->slab_cache, object, size, flags); in __kasan_krealloc()
501 struct slab *slab; in __kasan_mempool_poison_object() local
517 slab = folio_slab(folio); in __kasan_mempool_poison_object()
519 if (check_slab_allocation(slab->slab_cache, ptr, ip)) in __kasan_mempool_poison_object()
[all …]
Dreport.c498 struct slab *slab; in complete_report_info() local
506 slab = kasan_addr_to_slab(addr); in complete_report_info()
507 if (slab) { in complete_report_info()
508 info->cache = slab->slab_cache; in complete_report_info()
509 info->object = nearest_obj(info->cache, slab, addr); in complete_report_info()
Dgeneric.c526 struct slab *slab = kasan_addr_to_slab(addr); in __kasan_record_aux_stack() local
531 if (is_kfence_address(addr) || !slab) in __kasan_record_aux_stack()
534 cache = slab->slab_cache; in __kasan_record_aux_stack()
535 object = nearest_obj(cache, slab, addr); in __kasan_record_aux_stack()
/linux-6.12.1/scripts/gdb/linux/
Dslab.py38 def slab_folio(slab): argument
39 return slab.cast(gdb.lookup_type("struct folio").pointer())
41 def slab_address(slab): argument
43 folio = slab_folio(slab)
155 def __fill_map(obj_map, cache, slab): argument
156 p = slab['freelist']
157 addr = slab_address(slab)
165 for slab in lists.list_for_each_entry(slab_list, slab_ptr_type, "slab_list"):
167 __fill_map(obj_map, cache, slab)
168 addr = slab_address(slab)
[all …]
/linux-6.12.1/tools/cgroup/
Dmemcg_slabinfo.py73 for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
75 nr_objs += fn(slab)
79 def count_free(slab): argument
80 return slab.objects - slab.inuse
194 for slab in for_each_slab(prog):
195 objcg_vec_raw = slab.memcg_data.value_()
198 cache = slab.slab_cache
/linux-6.12.1/lib/
Dsg_pool.c13 struct kmem_cache *slab; member
150 sgp->slab = kmem_cache_create(sgp->name, size, 0, in sg_pool_init()
152 if (!sgp->slab) { in sg_pool_init()
159 sgp->slab); in sg_pool_init()
174 kmem_cache_destroy(sgp->slab); in sg_pool_init()
/linux-6.12.1/include/linux/
Dkfence.h221 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
245 static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kfence_obj_info() argument
Dkasan.h14 struct slab;
126 void __kasan_poison_slab(struct slab *slab);
127 static __always_inline void kasan_poison_slab(struct slab *slab) in kasan_poison_slab() argument
130 __kasan_poison_slab(slab); in kasan_poison_slab()
410 static inline void kasan_poison_slab(struct slab *slab) {} in kasan_poison_slab() argument
/linux-6.12.1/Documentation/mm/
Dslub.rst7 slab caches. SLUB always includes full debugging but it is off by default.
38 slab_debug=<Debug-Options>,<slab name1>,<slab name2>,...
45 of the first "select slabs" blocks that matches the slab's name are applied.
57 caused higher minimum slab orders
70 end of the slab name, in order to cover all slabs with the same prefix. For
76 Red zoning and tracking may realign the slab. We can just apply sanity checks
81 Debugging options may require the minimum possible slab order to increase as
83 sizes). This has a higher likelihood of resulting in slab allocation errors
89 You can apply different options to different list of slab names, using blocks
97 debugged by specifying global debug options followed by a list of slab names
[all …]
/linux-6.12.1/Documentation/translations/zh_TW/dev-tools/
Dkasan.rst64 通用KASAN支持在所有的slab、page_alloc、vmap、vmalloc、堆棧和全局內存
67 基於軟件標籤的KASAN支持slab、page_alloc、vmalloc和堆棧內存。
69 基於硬件標籤的KASAN支持slab、page_alloc和不可執行的vmalloc內存。
71 對於slab,兩種軟件KASAN模式都支持SLUB和SLAB分配器,而基於硬件標籤的
89 要將受影響的slab對象的alloc和free堆棧跟蹤包含到報告中,請啓用
140 BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0xa8/0xbc [kasan_test]
196 flags: 0x200000000000100(slab)
211 堆棧跟蹤、所訪問內存分配位置的堆棧跟蹤(對於訪問了slab對象的情況)以及對象
213 slab對象的描述以及關於訪問的內存頁的信息。
231 請注意,KASAN錯誤標題(如 ``slab-out-of-bounds`` 或 ``use-after-free`` )
/linux-6.12.1/Documentation/translations/zh_CN/dev-tools/
Dkasan.rst64 通用KASAN支持在所有的slab、page_alloc、vmap、vmalloc、堆栈和全局内存
67 基于软件标签的KASAN支持slab、page_alloc、vmalloc和堆栈内存。
69 基于硬件标签的KASAN支持slab、page_alloc和不可执行的vmalloc内存。
71 对于slab,两种软件KASAN模式都支持SLUB和SLAB分配器,而基于硬件标签的
89 要将受影响的slab对象的alloc和free堆栈跟踪包含到报告中,请启用
140 BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0xa8/0xbc [kasan_test]
196 flags: 0x200000000000100(slab)
211 堆栈跟踪、所访问内存分配位置的堆栈跟踪(对于访问了slab对象的情况)以及对象
213 slab对象的描述以及关于访问的内存页的信息。
231 请注意,KASAN错误标题(如 ``slab-out-of-bounds`` 或 ``use-after-free`` )
/linux-6.12.1/net/dccp/
Dccid.c81 struct kmem_cache *slab; in ccid_kmem_cache_create() local
88 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, in ccid_kmem_cache_create()
90 return slab; in ccid_kmem_cache_create()
93 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) in ccid_kmem_cache_destroy() argument
95 kmem_cache_destroy(slab); in ccid_kmem_cache_destroy()
/linux-6.12.1/mm/kfence/
Dcore.c419 struct slab *slab; in kfence_guarded_alloc() local
487 slab = virt_to_slab((void *)meta->addr); in kfence_guarded_alloc()
488 slab->slab_cache = cache; in kfence_guarded_alloc()
489 slab->objects = 1; in kfence_guarded_alloc()
615 struct slab *slab = page_slab(nth_page(pages, i)); in kfence_init_pool() local
620 __folio_set_slab(slab_folio(slab)); in kfence_init_pool()
622 slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts | in kfence_init_pool()
667 struct slab *slab = page_slab(nth_page(pages, i)); in kfence_init_pool() local
672 slab->obj_exts = 0; in kfence_init_pool()
674 __folio_clear_slab(slab_folio(slab)); in kfence_init_pool()
/linux-6.12.1/tools/perf/Documentation/
Dperf-kmem.txt47 Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit'
49 pingpong, frag' for slab and 'page, callsite, bytes, hit, order,
51 mode selection options - i.e. --slab, --page, --alloc and/or --caller.
60 --slab::
/linux-6.12.1/tools/testing/scatterlist/
DMakefile17 … $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h linux/slab.h asm/io.h
31 @touch linux/slab.h
/linux-6.12.1/Documentation/translations/zh_CN/mm/
Dsplit_page_table_lock.rst62 确保架构不使用slab分配器来分配页表:slab使用page->slab_cache来分配其页
/linux-6.12.1/Documentation/translations/zh_CN/core-api/
Dmm-api.rst49 include/linux/slab.h
51 mm/slab.c
Dmemory-allocation.rst131 如果你需要分配许多相同的对象,你可以使用slab缓存分配器。在使用缓存之前,应该用
137 和 `kvmalloc` 分配的内存。slab缓存应该用kmem_cache_free()来释放。不要忘记用
/linux-6.12.1/drivers/net/ethernet/chelsio/inline_crypto/chtls/
Dchtls_cm.h129 chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab; in chtls_init_rsk_ops()
137 kmem_cache_free(req->rsk_ops->slab, req); in chtls_reqsk_free()

1234