1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef MM_SLAB_H
3  #define MM_SLAB_H
4  
5  #include <linux/reciprocal_div.h>
6  #include <linux/list_lru.h>
7  #include <linux/local_lock.h>
8  #include <linux/random.h>
9  #include <linux/kobject.h>
10  #include <linux/sched/mm.h>
11  #include <linux/memcontrol.h>
12  #include <linux/kfence.h>
13  #include <linux/kasan.h>
14  
15  /*
16   * Internal slab definitions
17   */
18  
19  #ifdef CONFIG_64BIT
20  # ifdef system_has_cmpxchg128
21  # define system_has_freelist_aba()	system_has_cmpxchg128()
22  # define try_cmpxchg_freelist		try_cmpxchg128
23  # endif
24  #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg128
25  typedef u128 freelist_full_t;
26  #else /* CONFIG_64BIT */
27  # ifdef system_has_cmpxchg64
28  # define system_has_freelist_aba()	system_has_cmpxchg64()
29  # define try_cmpxchg_freelist		try_cmpxchg64
30  # endif
31  #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg64
32  typedef u64 freelist_full_t;
33  #endif /* CONFIG_64BIT */
34  
35  #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
36  #undef system_has_freelist_aba
37  #endif
38  
39  /*
40   * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
41   * problems with cmpxchg of just a pointer.
42   */
43  typedef union {
44  	struct {
45  		void *freelist;
46  		unsigned long counter;
47  	};
48  	freelist_full_t full;
49  } freelist_aba_t;
50  
51  /* Reuses the bits in struct page */
52  struct slab {
53  	unsigned long __page_flags;
54  
55  	struct kmem_cache *slab_cache;
56  	union {
57  		struct {
58  			union {
59  				struct list_head slab_list;
60  #ifdef CONFIG_SLUB_CPU_PARTIAL
61  				struct {
62  					struct slab *next;
63  					int slabs;	/* Nr of slabs left */
64  				};
65  #endif
66  			};
67  			/* Double-word boundary */
68  			union {
69  				struct {
70  					void *freelist;		/* first free object */
71  					union {
72  						unsigned long counters;
73  						struct {
74  							unsigned inuse:16;
75  							unsigned objects:15;
76  							unsigned frozen:1;
77  						};
78  					};
79  				};
80  #ifdef system_has_freelist_aba
81  				freelist_aba_t freelist_counter;
82  #endif
83  			};
84  		};
85  		struct rcu_head rcu_head;
86  	};
87  
88  	unsigned int __page_type;
89  	atomic_t __page_refcount;
90  #ifdef CONFIG_SLAB_OBJ_EXT
91  	unsigned long obj_exts;
92  #endif
93  };
94  
95  #define SLAB_MATCH(pg, sl)						\
96  	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
97  SLAB_MATCH(flags, __page_flags);
98  SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
99  SLAB_MATCH(_refcount, __page_refcount);
100  #ifdef CONFIG_MEMCG
101  SLAB_MATCH(memcg_data, obj_exts);
102  #elif defined(CONFIG_SLAB_OBJ_EXT)
103  SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
104  #endif
105  #undef SLAB_MATCH
106  static_assert(sizeof(struct slab) <= sizeof(struct page));
107  #if defined(system_has_freelist_aba)
108  static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
109  #endif
110  
111  /**
112   * folio_slab - Converts from folio to slab.
113   * @folio: The folio.
114   *
115   * Currently struct slab is a different representation of a folio where
116   * folio_test_slab() is true.
117   *
118   * Return: The slab which contains this folio.
119   */
120  #define folio_slab(folio)	(_Generic((folio),			\
121  	const struct folio *:	(const struct slab *)(folio),		\
122  	struct folio *:		(struct slab *)(folio)))
123  
124  /**
125   * slab_folio - The folio allocated for a slab
126   * @slab: The slab.
127   *
128   * Slabs are allocated as folios that contain the individual objects and are
129   * using some fields in the first struct page of the folio - those fields are
130   * now accessed by struct slab. It is occasionally necessary to convert back to
131   * a folio in order to communicate with the rest of the mm.  Please use this
132   * helper function instead of casting yourself, as the implementation may change
133   * in the future.
134   */
135  #define slab_folio(s)		(_Generic((s),				\
136  	const struct slab *:	(const struct folio *)s,		\
137  	struct slab *:		(struct folio *)s))
138  
139  /**
140   * page_slab - Converts from first struct page to slab.
141   * @p: The first (either head of compound or single) page of slab.
142   *
143   * A temporary wrapper to convert struct page to struct slab in situations where
144   * we know the page is the compound head, or single order-0 page.
145   *
146   * Long-term ideally everything would work with struct slab directly or go
147   * through folio to struct slab.
148   *
149   * Return: The slab which contains this page
150   */
151  #define page_slab(p)		(_Generic((p),				\
152  	const struct page *:	(const struct slab *)(p),		\
153  	struct page *:		(struct slab *)(p)))
154  
155  /**
156   * slab_page - The first struct page allocated for a slab
157   * @slab: The slab.
158   *
159   * A convenience wrapper for converting slab to the first struct page of the
160   * underlying folio, to communicate with code not yet converted to folio or
161   * struct slab.
162   */
163  #define slab_page(s) folio_page(slab_folio(s), 0)
164  
165  /*
166   * If network-based swap is enabled, sl*b must keep track of whether pages
167   * were allocated from pfmemalloc reserves.
168   */
slab_test_pfmemalloc(const struct slab * slab)169  static inline bool slab_test_pfmemalloc(const struct slab *slab)
170  {
171  	return folio_test_active(slab_folio(slab));
172  }
173  
slab_set_pfmemalloc(struct slab * slab)174  static inline void slab_set_pfmemalloc(struct slab *slab)
175  {
176  	folio_set_active(slab_folio(slab));
177  }
178  
slab_clear_pfmemalloc(struct slab * slab)179  static inline void slab_clear_pfmemalloc(struct slab *slab)
180  {
181  	folio_clear_active(slab_folio(slab));
182  }
183  
__slab_clear_pfmemalloc(struct slab * slab)184  static inline void __slab_clear_pfmemalloc(struct slab *slab)
185  {
186  	__folio_clear_active(slab_folio(slab));
187  }
188  
slab_address(const struct slab * slab)189  static inline void *slab_address(const struct slab *slab)
190  {
191  	return folio_address(slab_folio(slab));
192  }
193  
slab_nid(const struct slab * slab)194  static inline int slab_nid(const struct slab *slab)
195  {
196  	return folio_nid(slab_folio(slab));
197  }
198  
slab_pgdat(const struct slab * slab)199  static inline pg_data_t *slab_pgdat(const struct slab *slab)
200  {
201  	return folio_pgdat(slab_folio(slab));
202  }
203  
virt_to_slab(const void * addr)204  static inline struct slab *virt_to_slab(const void *addr)
205  {
206  	struct folio *folio = virt_to_folio(addr);
207  
208  	if (!folio_test_slab(folio))
209  		return NULL;
210  
211  	return folio_slab(folio);
212  }
213  
slab_order(const struct slab * slab)214  static inline int slab_order(const struct slab *slab)
215  {
216  	return folio_order(slab_folio(slab));
217  }
218  
slab_size(const struct slab * slab)219  static inline size_t slab_size(const struct slab *slab)
220  {
221  	return PAGE_SIZE << slab_order(slab);
222  }
223  
224  #ifdef CONFIG_SLUB_CPU_PARTIAL
225  #define slub_percpu_partial(c)			((c)->partial)
226  
227  #define slub_set_percpu_partial(c, p)		\
228  ({						\
229  	slub_percpu_partial(c) = (p)->next;	\
230  })
231  
232  #define slub_percpu_partial_read_once(c)	READ_ONCE(slub_percpu_partial(c))
233  #else
234  #define slub_percpu_partial(c)			NULL
235  
236  #define slub_set_percpu_partial(c, p)
237  
238  #define slub_percpu_partial_read_once(c)	NULL
239  #endif // CONFIG_SLUB_CPU_PARTIAL
240  
241  /*
242   * Word size structure that can be atomically updated or read and that
243   * contains both the order and the number of objects that a slab of the
244   * given order would contain.
245   */
246  struct kmem_cache_order_objects {
247  	unsigned int x;
248  };
249  
250  /*
251   * Slab cache management.
252   */
253  struct kmem_cache {
254  #ifndef CONFIG_SLUB_TINY
255  	struct kmem_cache_cpu __percpu *cpu_slab;
256  #endif
257  	/* Used for retrieving partial slabs, etc. */
258  	slab_flags_t flags;
259  	unsigned long min_partial;
260  	unsigned int size;		/* Object size including metadata */
261  	unsigned int object_size;	/* Object size without metadata */
262  	struct reciprocal_value reciprocal_size;
263  	unsigned int offset;		/* Free pointer offset */
264  #ifdef CONFIG_SLUB_CPU_PARTIAL
265  	/* Number of per cpu partial objects to keep around */
266  	unsigned int cpu_partial;
267  	/* Number of per cpu partial slabs to keep around */
268  	unsigned int cpu_partial_slabs;
269  #endif
270  	struct kmem_cache_order_objects oo;
271  
272  	/* Allocation and freeing of slabs */
273  	struct kmem_cache_order_objects min;
274  	gfp_t allocflags;		/* gfp flags to use on each alloc */
275  	int refcount;			/* Refcount for slab cache destroy */
276  	void (*ctor)(void *object);	/* Object constructor */
277  	unsigned int inuse;		/* Offset to metadata */
278  	unsigned int align;		/* Alignment */
279  	unsigned int red_left_pad;	/* Left redzone padding size */
280  	const char *name;		/* Name (only for display!) */
281  	struct list_head list;		/* List of slab caches */
282  #ifdef CONFIG_SYSFS
283  	struct kobject kobj;		/* For sysfs */
284  #endif
285  #ifdef CONFIG_SLAB_FREELIST_HARDENED
286  	unsigned long random;
287  #endif
288  
289  #ifdef CONFIG_NUMA
290  	/*
291  	 * Defragmentation by allocating from a remote node.
292  	 */
293  	unsigned int remote_node_defrag_ratio;
294  #endif
295  
296  #ifdef CONFIG_SLAB_FREELIST_RANDOM
297  	unsigned int *random_seq;
298  #endif
299  
300  #ifdef CONFIG_KASAN_GENERIC
301  	struct kasan_cache kasan_info;
302  #endif
303  
304  #ifdef CONFIG_HARDENED_USERCOPY
305  	unsigned int useroffset;	/* Usercopy region offset */
306  	unsigned int usersize;		/* Usercopy region size */
307  #endif
308  
309  	struct kmem_cache_node *node[MAX_NUMNODES];
310  };
311  
312  #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
313  #define SLAB_SUPPORTS_SYSFS 1
314  void sysfs_slab_unlink(struct kmem_cache *s);
315  void sysfs_slab_release(struct kmem_cache *s);
316  #else
sysfs_slab_unlink(struct kmem_cache * s)317  static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
sysfs_slab_release(struct kmem_cache * s)318  static inline void sysfs_slab_release(struct kmem_cache *s) { }
319  #endif
320  
321  void *fixup_red_left(struct kmem_cache *s, void *p);
322  
nearest_obj(struct kmem_cache * cache,const struct slab * slab,void * x)323  static inline void *nearest_obj(struct kmem_cache *cache,
324  				const struct slab *slab, void *x)
325  {
326  	void *object = x - (x - slab_address(slab)) % cache->size;
327  	void *last_object = slab_address(slab) +
328  		(slab->objects - 1) * cache->size;
329  	void *result = (unlikely(object > last_object)) ? last_object : object;
330  
331  	result = fixup_red_left(cache, result);
332  	return result;
333  }
334  
335  /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,void * obj)336  static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
337  					  void *addr, void *obj)
338  {
339  	return reciprocal_divide(kasan_reset_tag(obj) - addr,
340  				 cache->reciprocal_size);
341  }
342  
obj_to_index(const struct kmem_cache * cache,const struct slab * slab,void * obj)343  static inline unsigned int obj_to_index(const struct kmem_cache *cache,
344  					const struct slab *slab, void *obj)
345  {
346  	if (is_kfence_address(obj))
347  		return 0;
348  	return __obj_to_index(cache, slab_address(slab), obj);
349  }
350  
objs_per_slab(const struct kmem_cache * cache,const struct slab * slab)351  static inline int objs_per_slab(const struct kmem_cache *cache,
352  				const struct slab *slab)
353  {
354  	return slab->objects;
355  }
356  
357  /*
358   * State of the slab allocator.
359   *
360   * This is used to describe the states of the allocator during bootup.
361   * Allocators use this to gradually bootstrap themselves. Most allocators
362   * have the problem that the structures used for managing slab caches are
363   * allocated from slab caches themselves.
364   */
365  enum slab_state {
366  	DOWN,			/* No slab functionality yet */
367  	PARTIAL,		/* SLUB: kmem_cache_node available */
368  	UP,			/* Slab caches usable but not all extras yet */
369  	FULL			/* Everything is working */
370  };
371  
372  extern enum slab_state slab_state;
373  
374  /* The slab cache mutex protects the management structures during changes */
375  extern struct mutex slab_mutex;
376  
377  /* The list of all slab caches on the system */
378  extern struct list_head slab_caches;
379  
380  /* The slab cache that manages slab cache information */
381  extern struct kmem_cache *kmem_cache;
382  
383  /* A table of kmalloc cache names and sizes */
384  extern const struct kmalloc_info_struct {
385  	const char *name[NR_KMALLOC_TYPES];
386  	unsigned int size;
387  } kmalloc_info[];
388  
389  /* Kmalloc array related functions */
390  void setup_kmalloc_cache_index_table(void);
391  void create_kmalloc_caches(void);
392  
393  extern u8 kmalloc_size_index[24];
394  
size_index_elem(unsigned int bytes)395  static inline unsigned int size_index_elem(unsigned int bytes)
396  {
397  	return (bytes - 1) / 8;
398  }
399  
400  /*
401   * Find the kmem_cache structure that serves a given size of
402   * allocation
403   *
404   * This assumes size is larger than zero and not larger than
405   * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
406   */
407  static inline struct kmem_cache *
kmalloc_slab(size_t size,kmem_buckets * b,gfp_t flags,unsigned long caller)408  kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
409  {
410  	unsigned int index;
411  
412  	if (!b)
413  		b = &kmalloc_caches[kmalloc_type(flags, caller)];
414  	if (size <= 192)
415  		index = kmalloc_size_index[size_index_elem(size)];
416  	else
417  		index = fls(size - 1);
418  
419  	return (*b)[index];
420  }
421  
422  gfp_t kmalloc_fix_flags(gfp_t flags);
423  
424  /* Functions provided by the slab allocators */
425  int do_kmem_cache_create(struct kmem_cache *s, const char *name,
426  			 unsigned int size, struct kmem_cache_args *args,
427  			 slab_flags_t flags);
428  
429  void __init kmem_cache_init(void);
430  extern void create_boot_cache(struct kmem_cache *, const char *name,
431  			unsigned int size, slab_flags_t flags,
432  			unsigned int useroffset, unsigned int usersize);
433  
434  int slab_unmergeable(struct kmem_cache *s);
435  struct kmem_cache *find_mergeable(unsigned size, unsigned align,
436  		slab_flags_t flags, const char *name, void (*ctor)(void *));
437  struct kmem_cache *
438  __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
439  		   slab_flags_t flags, void (*ctor)(void *));
440  
441  slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
442  
is_kmalloc_cache(struct kmem_cache * s)443  static inline bool is_kmalloc_cache(struct kmem_cache *s)
444  {
445  	return (s->flags & SLAB_KMALLOC);
446  }
447  
is_kmalloc_normal(struct kmem_cache * s)448  static inline bool is_kmalloc_normal(struct kmem_cache *s)
449  {
450  	if (!is_kmalloc_cache(s))
451  		return false;
452  	return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
453  }
454  
455  /* Legal flag mask for kmem_cache_create(), for various configurations */
456  #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
457  			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
458  			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
459  
460  #ifdef CONFIG_SLUB_DEBUG
461  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
462  			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
463  #else
464  #define SLAB_DEBUG_FLAGS (0)
465  #endif
466  
467  #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
468  			  SLAB_TEMPORARY | SLAB_ACCOUNT | \
469  			  SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
470  
471  /* Common flags available with current configuration */
472  #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
473  
474  /* Common flags permitted for kmem_cache_create */
475  #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
476  			      SLAB_RED_ZONE | \
477  			      SLAB_POISON | \
478  			      SLAB_STORE_USER | \
479  			      SLAB_TRACE | \
480  			      SLAB_CONSISTENCY_CHECKS | \
481  			      SLAB_NOLEAKTRACE | \
482  			      SLAB_RECLAIM_ACCOUNT | \
483  			      SLAB_TEMPORARY | \
484  			      SLAB_ACCOUNT | \
485  			      SLAB_KMALLOC | \
486  			      SLAB_NO_MERGE | \
487  			      SLAB_NO_USER_FLAGS)
488  
489  bool __kmem_cache_empty(struct kmem_cache *);
490  int __kmem_cache_shutdown(struct kmem_cache *);
491  void __kmem_cache_release(struct kmem_cache *);
492  int __kmem_cache_shrink(struct kmem_cache *);
493  void slab_kmem_cache_release(struct kmem_cache *);
494  
495  struct seq_file;
496  struct file;
497  
498  struct slabinfo {
499  	unsigned long active_objs;
500  	unsigned long num_objs;
501  	unsigned long active_slabs;
502  	unsigned long num_slabs;
503  	unsigned long shared_avail;
504  	unsigned int limit;
505  	unsigned int batchcount;
506  	unsigned int shared;
507  	unsigned int objects_per_slab;
508  	unsigned int cache_order;
509  };
510  
511  void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
512  
513  #ifdef CONFIG_SLUB_DEBUG
514  #ifdef CONFIG_SLUB_DEBUG_ON
515  DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
516  #else
517  DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
518  #endif
519  extern void print_tracking(struct kmem_cache *s, void *object);
520  long validate_slab_cache(struct kmem_cache *s);
__slub_debug_enabled(void)521  static inline bool __slub_debug_enabled(void)
522  {
523  	return static_branch_unlikely(&slub_debug_enabled);
524  }
525  #else
print_tracking(struct kmem_cache * s,void * object)526  static inline void print_tracking(struct kmem_cache *s, void *object)
527  {
528  }
__slub_debug_enabled(void)529  static inline bool __slub_debug_enabled(void)
530  {
531  	return false;
532  }
533  #endif
534  
535  /*
536   * Returns true if any of the specified slab_debug flags is enabled for the
537   * cache. Use only for flags parsed by setup_slub_debug() as it also enables
538   * the static key.
539   */
kmem_cache_debug_flags(struct kmem_cache * s,slab_flags_t flags)540  static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
541  {
542  	if (IS_ENABLED(CONFIG_SLUB_DEBUG))
543  		VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
544  	if (__slub_debug_enabled())
545  		return s->flags & flags;
546  	return false;
547  }
548  
549  #if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
550  bool slab_in_kunit_test(void);
551  #else
slab_in_kunit_test(void)552  static inline bool slab_in_kunit_test(void) { return false; }
553  #endif
554  
555  #ifdef CONFIG_SLAB_OBJ_EXT
556  
557  /*
558   * slab_obj_exts - get the pointer to the slab object extension vector
559   * associated with a slab.
560   * @slab: a pointer to the slab struct
561   *
562   * Returns a pointer to the object extension vector associated with the slab,
563   * or NULL if no such vector has been associated yet.
564   */
slab_obj_exts(struct slab * slab)565  static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
566  {
567  	unsigned long obj_exts = READ_ONCE(slab->obj_exts);
568  
569  #ifdef CONFIG_MEMCG
570  	VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS),
571  							slab_page(slab));
572  	VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
573  #endif
574  	return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
575  }
576  
577  int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
578                          gfp_t gfp, bool new_slab);
579  
580  #else /* CONFIG_SLAB_OBJ_EXT */
581  
slab_obj_exts(struct slab * slab)582  static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
583  {
584  	return NULL;
585  }
586  
587  #endif /* CONFIG_SLAB_OBJ_EXT */
588  
cache_vmstat_idx(struct kmem_cache * s)589  static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
590  {
591  	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
592  		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
593  }
594  
595  #ifdef CONFIG_MEMCG
596  bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
597  				  gfp_t flags, size_t size, void **p);
598  void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
599  			    void **p, int objects, struct slabobj_ext *obj_exts);
600  #endif
601  
602  size_t __ksize(const void *objp);
603  
slab_ksize(const struct kmem_cache * s)604  static inline size_t slab_ksize(const struct kmem_cache *s)
605  {
606  #ifdef CONFIG_SLUB_DEBUG
607  	/*
608  	 * Debugging requires use of the padding between object
609  	 * and whatever may come after it.
610  	 */
611  	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
612  		return s->object_size;
613  #endif
614  	if (s->flags & SLAB_KASAN)
615  		return s->object_size;
616  	/*
617  	 * If we have the need to store the freelist pointer
618  	 * back there or track user information then we can
619  	 * only use the space before that information.
620  	 */
621  	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
622  		return s->inuse;
623  	/*
624  	 * Else we can use all the padding etc for the allocation
625  	 */
626  	return s->size;
627  }
628  
629  #ifdef CONFIG_SLUB_DEBUG
630  void dump_unreclaimable_slab(void);
631  #else
dump_unreclaimable_slab(void)632  static inline void dump_unreclaimable_slab(void)
633  {
634  }
635  #endif
636  
637  void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
638  
639  #ifdef CONFIG_SLAB_FREELIST_RANDOM
640  int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
641  			gfp_t gfp);
642  void cache_random_seq_destroy(struct kmem_cache *cachep);
643  #else
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)644  static inline int cache_random_seq_create(struct kmem_cache *cachep,
645  					unsigned int count, gfp_t gfp)
646  {
647  	return 0;
648  }
cache_random_seq_destroy(struct kmem_cache * cachep)649  static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
650  #endif /* CONFIG_SLAB_FREELIST_RANDOM */
651  
slab_want_init_on_alloc(gfp_t flags,struct kmem_cache * c)652  static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
653  {
654  	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
655  				&init_on_alloc)) {
656  		if (c->ctor)
657  			return false;
658  		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
659  			return flags & __GFP_ZERO;
660  		return true;
661  	}
662  	return flags & __GFP_ZERO;
663  }
664  
slab_want_init_on_free(struct kmem_cache * c)665  static inline bool slab_want_init_on_free(struct kmem_cache *c)
666  {
667  	if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
668  				&init_on_free))
669  		return !(c->ctor ||
670  			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
671  	return false;
672  }
673  
674  #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
675  void debugfs_slab_release(struct kmem_cache *);
676  #else
debugfs_slab_release(struct kmem_cache * s)677  static inline void debugfs_slab_release(struct kmem_cache *s) { }
678  #endif
679  
680  #ifdef CONFIG_PRINTK
681  #define KS_ADDRS_COUNT 16
682  struct kmem_obj_info {
683  	void *kp_ptr;
684  	struct slab *kp_slab;
685  	void *kp_objp;
686  	unsigned long kp_data_offset;
687  	struct kmem_cache *kp_slab_cache;
688  	void *kp_ret;
689  	void *kp_stack[KS_ADDRS_COUNT];
690  	void *kp_free_stack[KS_ADDRS_COUNT];
691  };
692  void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
693  #endif
694  
695  void __check_heap_object(const void *ptr, unsigned long n,
696  			 const struct slab *slab, bool to_user);
697  
698  #ifdef CONFIG_SLUB_DEBUG
699  void skip_orig_size_check(struct kmem_cache *s, const void *object);
700  #endif
701  
702  #endif /* MM_SLAB_H */
703