Lines Matching refs:slab

52 struct slab {  struct
62 struct slab *next; argument
96 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) argument
106 static_assert(sizeof(struct slab) <= sizeof(struct page));
108 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
121 const struct folio *: (const struct slab *)(folio), \
122 struct folio *: (struct slab *)(folio)))
136 const struct slab *: (const struct folio *)s, \
137 struct slab *: (struct folio *)s))
152 const struct page *: (const struct slab *)(p), \
153 struct page *: (struct slab *)(p)))
169 static inline bool slab_test_pfmemalloc(const struct slab *slab) in slab_test_pfmemalloc() argument
171 return folio_test_active(slab_folio(slab)); in slab_test_pfmemalloc()
174 static inline void slab_set_pfmemalloc(struct slab *slab) in slab_set_pfmemalloc() argument
176 folio_set_active(slab_folio(slab)); in slab_set_pfmemalloc()
179 static inline void slab_clear_pfmemalloc(struct slab *slab) in slab_clear_pfmemalloc() argument
181 folio_clear_active(slab_folio(slab)); in slab_clear_pfmemalloc()
184 static inline void __slab_clear_pfmemalloc(struct slab *slab) in __slab_clear_pfmemalloc() argument
186 __folio_clear_active(slab_folio(slab)); in __slab_clear_pfmemalloc()
189 static inline void *slab_address(const struct slab *slab) in slab_address() argument
191 return folio_address(slab_folio(slab)); in slab_address()
194 static inline int slab_nid(const struct slab *slab) in slab_nid() argument
196 return folio_nid(slab_folio(slab)); in slab_nid()
199 static inline pg_data_t *slab_pgdat(const struct slab *slab) in slab_pgdat() argument
201 return folio_pgdat(slab_folio(slab)); in slab_pgdat()
204 static inline struct slab *virt_to_slab(const void *addr) in virt_to_slab()
214 static inline int slab_order(const struct slab *slab) in slab_order() argument
216 return folio_order(slab_folio(slab)); in slab_order()
219 static inline size_t slab_size(const struct slab *slab) in slab_size() argument
221 return PAGE_SIZE << slab_order(slab); in slab_size()
324 const struct slab *slab, void *x) in nearest_obj() argument
326 void *object = x - (x - slab_address(slab)) % cache->size; in nearest_obj()
327 void *last_object = slab_address(slab) + in nearest_obj()
328 (slab->objects - 1) * cache->size; in nearest_obj()
344 const struct slab *slab, void *obj) in obj_to_index() argument
348 return __obj_to_index(cache, slab_address(slab), obj); in obj_to_index()
352 const struct slab *slab) in objs_per_slab() argument
354 return slab->objects; in objs_per_slab()
565 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) in slab_obj_exts() argument
567 unsigned long obj_exts = READ_ONCE(slab->obj_exts); in slab_obj_exts()
571 slab_page(slab)); in slab_obj_exts()
572 VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); in slab_obj_exts()
577 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
582 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) in slab_obj_exts() argument
598 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
684 struct slab *kp_slab;
692 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
696 const struct slab *slab, bool to_user);