1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /* memcontrol.h - Memory Controller
3   *
4   * Copyright IBM Corporation, 2007
5   * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6   *
7   * Copyright 2007 OpenVZ SWsoft Inc
8   * Author: Pavel Emelianov <xemul@openvz.org>
9   */
10  
11  #ifndef _LINUX_MEMCONTROL_H
12  #define _LINUX_MEMCONTROL_H
13  #include <linux/cgroup.h>
14  #include <linux/vm_event_item.h>
15  #include <linux/hardirq.h>
16  #include <linux/jump_label.h>
17  #include <linux/kernel.h>
18  #include <linux/page_counter.h>
19  #include <linux/vmpressure.h>
20  #include <linux/eventfd.h>
21  #include <linux/mm.h>
22  #include <linux/vmstat.h>
23  #include <linux/writeback.h>
24  #include <linux/page-flags.h>
25  #include <linux/shrinker.h>
26  
27  struct mem_cgroup;
28  struct obj_cgroup;
29  struct page;
30  struct mm_struct;
31  struct kmem_cache;
32  
33  /* Cgroup-specific page state, on top of universal node page state */
34  enum memcg_stat_item {
35  	MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
36  	MEMCG_SOCK,
37  	MEMCG_PERCPU_B,
38  	MEMCG_VMALLOC,
39  	MEMCG_KMEM,
40  	MEMCG_ZSWAP_B,
41  	MEMCG_ZSWAPPED,
42  	MEMCG_NR_STAT,
43  };
44  
45  enum memcg_memory_event {
46  	MEMCG_LOW,
47  	MEMCG_HIGH,
48  	MEMCG_MAX,
49  	MEMCG_OOM,
50  	MEMCG_OOM_KILL,
51  	MEMCG_OOM_GROUP_KILL,
52  	MEMCG_SWAP_HIGH,
53  	MEMCG_SWAP_MAX,
54  	MEMCG_SWAP_FAIL,
55  	MEMCG_NR_MEMORY_EVENTS,
56  };
57  
58  struct mem_cgroup_reclaim_cookie {
59  	pg_data_t *pgdat;
60  	int generation;
61  };
62  
63  #ifdef CONFIG_MEMCG
64  
65  #define MEM_CGROUP_ID_SHIFT	16
66  
67  struct mem_cgroup_id {
68  	int id;
69  	refcount_t ref;
70  };
71  
72  struct memcg_vmstats_percpu;
73  struct memcg1_events_percpu;
74  struct memcg_vmstats;
75  struct lruvec_stats_percpu;
76  struct lruvec_stats;
77  
78  struct mem_cgroup_reclaim_iter {
79  	struct mem_cgroup *position;
80  	/* scan generation, increased every round-trip */
81  	atomic_t generation;
82  };
83  
84  /*
85   * per-node information in memory controller.
86   */
87  struct mem_cgroup_per_node {
88  	/* Keep the read-only fields at the start */
89  	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
90  						/* use container_of	   */
91  
92  	struct lruvec_stats_percpu __percpu	*lruvec_stats_percpu;
93  	struct lruvec_stats			*lruvec_stats;
94  	struct shrinker_info __rcu	*shrinker_info;
95  
96  #ifdef CONFIG_MEMCG_V1
97  	/*
98  	 * Memcg-v1 only stuff in middle as buffer between read mostly fields
99  	 * and update often fields to avoid false sharing. If v1 stuff is
100  	 * not present, an explicit padding is needed.
101  	 */
102  
103  	struct rb_node		tree_node;	/* RB tree node */
104  	unsigned long		usage_in_excess;/* Set to the value by which */
105  						/* the soft limit is exceeded*/
106  	bool			on_tree;
107  #else
108  	CACHELINE_PADDING(_pad1_);
109  #endif
110  
111  	/* Fields which get updated often at the end. */
112  	struct lruvec		lruvec;
113  	CACHELINE_PADDING(_pad2_);
114  	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
115  	struct mem_cgroup_reclaim_iter	iter;
116  };
117  
118  struct mem_cgroup_threshold {
119  	struct eventfd_ctx *eventfd;
120  	unsigned long threshold;
121  };
122  
123  /* For threshold */
124  struct mem_cgroup_threshold_ary {
125  	/* An array index points to threshold just below or equal to usage. */
126  	int current_threshold;
127  	/* Size of entries[] */
128  	unsigned int size;
129  	/* Array of thresholds */
130  	struct mem_cgroup_threshold entries[] __counted_by(size);
131  };
132  
133  struct mem_cgroup_thresholds {
134  	/* Primary thresholds array */
135  	struct mem_cgroup_threshold_ary *primary;
136  	/*
137  	 * Spare threshold array.
138  	 * This is needed to make mem_cgroup_unregister_event() "never fail".
139  	 * It must be able to store at least primary->size - 1 entries.
140  	 */
141  	struct mem_cgroup_threshold_ary *spare;
142  };
143  
144  /*
145   * Remember four most recent foreign writebacks with dirty pages in this
146   * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
147   * one in a given round, we're likely to catch it later if it keeps
148   * foreign-dirtying, so a fairly low count should be enough.
149   *
150   * See mem_cgroup_track_foreign_dirty_slowpath() for details.
151   */
152  #define MEMCG_CGWB_FRN_CNT	4
153  
154  struct memcg_cgwb_frn {
155  	u64 bdi_id;			/* bdi->id of the foreign inode */
156  	int memcg_id;			/* memcg->css.id of foreign inode */
157  	u64 at;				/* jiffies_64 at the time of dirtying */
158  	struct wb_completion done;	/* tracks in-flight foreign writebacks */
159  };
160  
161  /*
162   * Bucket for arbitrarily byte-sized objects charged to a memory
163   * cgroup. The bucket can be reparented in one piece when the cgroup
164   * is destroyed, without having to round up the individual references
165   * of all live memory objects in the wild.
166   */
167  struct obj_cgroup {
168  	struct percpu_ref refcnt;
169  	struct mem_cgroup *memcg;
170  	atomic_t nr_charged_bytes;
171  	union {
172  		struct list_head list; /* protected by objcg_lock */
173  		struct rcu_head rcu;
174  	};
175  };
176  
177  /*
178   * The memory controller data structure. The memory controller controls both
179   * page cache and RSS per cgroup. We would eventually like to provide
180   * statistics based on the statistics developed by Rik Van Riel for clock-pro,
181   * to help the administrator determine what knobs to tune.
182   */
183  struct mem_cgroup {
184  	struct cgroup_subsys_state css;
185  
186  	/* Private memcg ID. Used to ID objects that outlive the cgroup */
187  	struct mem_cgroup_id id;
188  
189  	/* Accounted resources */
190  	struct page_counter memory;		/* Both v1 & v2 */
191  
192  	union {
193  		struct page_counter swap;	/* v2 only */
194  		struct page_counter memsw;	/* v1 only */
195  	};
196  
197  	/* registered local peak watchers */
198  	struct list_head memory_peaks;
199  	struct list_head swap_peaks;
200  	spinlock_t	 peaks_lock;
201  
202  	/* Range enforcement for interrupt charges */
203  	struct work_struct high_work;
204  
205  #ifdef CONFIG_ZSWAP
206  	unsigned long zswap_max;
207  
208  	/*
209  	 * Prevent pages from this memcg from being written back from zswap to
210  	 * swap, and from being swapped out on zswap store failures.
211  	 */
212  	bool zswap_writeback;
213  #endif
214  
215  	/* vmpressure notifications */
216  	struct vmpressure vmpressure;
217  
218  	/*
219  	 * Should the OOM killer kill all belonging tasks, had it kill one?
220  	 */
221  	bool oom_group;
222  
223  	int swappiness;
224  
225  	/* memory.events and memory.events.local */
226  	struct cgroup_file events_file;
227  	struct cgroup_file events_local_file;
228  
229  	/* handle for "memory.swap.events" */
230  	struct cgroup_file swap_events_file;
231  
232  	/* memory.stat */
233  	struct memcg_vmstats	*vmstats;
234  
235  	/* memory.events */
236  	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
237  	atomic_long_t		memory_events_local[MEMCG_NR_MEMORY_EVENTS];
238  
239  	/*
240  	 * Hint of reclaim pressure for socket memroy management. Note
241  	 * that this indicator should NOT be used in legacy cgroup mode
242  	 * where socket memory is accounted/charged separately.
243  	 */
244  	unsigned long		socket_pressure;
245  
246  	int kmemcg_id;
247  	/*
248  	 * memcg->objcg is wiped out as a part of the objcg repaprenting
249  	 * process. memcg->orig_objcg preserves a pointer (and a reference)
250  	 * to the original objcg until the end of live of memcg.
251  	 */
252  	struct obj_cgroup __rcu	*objcg;
253  	struct obj_cgroup	*orig_objcg;
254  	/* list of inherited objcgs, protected by objcg_lock */
255  	struct list_head objcg_list;
256  
257  	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
258  
259  #ifdef CONFIG_CGROUP_WRITEBACK
260  	struct list_head cgwb_list;
261  	struct wb_domain cgwb_domain;
262  	struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
263  #endif
264  
265  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
266  	struct deferred_split deferred_split_queue;
267  #endif
268  
269  #ifdef CONFIG_LRU_GEN_WALKS_MMU
270  	/* per-memcg mm_struct list */
271  	struct lru_gen_mm_list mm_list;
272  #endif
273  
274  #ifdef CONFIG_MEMCG_V1
275  	/* Legacy consumer-oriented counters */
276  	struct page_counter kmem;		/* v1 only */
277  	struct page_counter tcpmem;		/* v1 only */
278  
279  	struct memcg1_events_percpu __percpu *events_percpu;
280  
281  	unsigned long soft_limit;
282  
283  	/* protected by memcg_oom_lock */
284  	bool oom_lock;
285  	int under_oom;
286  
287  	/* OOM-Killer disable */
288  	int oom_kill_disable;
289  
290  	/* protect arrays of thresholds */
291  	struct mutex thresholds_lock;
292  
293  	/* thresholds for memory usage. RCU-protected */
294  	struct mem_cgroup_thresholds thresholds;
295  
296  	/* thresholds for mem+swap usage. RCU-protected */
297  	struct mem_cgroup_thresholds memsw_thresholds;
298  
299  	/* For oom notifier event fd */
300  	struct list_head oom_notify;
301  
302  	/*
303  	 * Should we move charges of a task when a task is moved into this
304  	 * mem_cgroup ? And what type of charges should we move ?
305  	 */
306  	unsigned long move_charge_at_immigrate;
307  	/* taken only while moving_account > 0 */
308  	spinlock_t move_lock;
309  	unsigned long move_lock_flags;
310  
311  	/* Legacy tcp memory accounting */
312  	bool tcpmem_active;
313  	int tcpmem_pressure;
314  
315  	/*
316  	 * set > 0 if pages under this cgroup are moving to other cgroup.
317  	 */
318  	atomic_t moving_account;
319  	struct task_struct *move_lock_task;
320  
321  	/* List of events which userspace want to receive */
322  	struct list_head event_list;
323  	spinlock_t event_list_lock;
324  #endif /* CONFIG_MEMCG_V1 */
325  
326  	struct mem_cgroup_per_node *nodeinfo[];
327  };
328  
329  /*
330   * size of first charge trial.
331   * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
332   * workload.
333   */
334  #define MEMCG_CHARGE_BATCH 64U
335  
336  extern struct mem_cgroup *root_mem_cgroup;
337  
338  enum page_memcg_data_flags {
339  	/* page->memcg_data is a pointer to an slabobj_ext vector */
340  	MEMCG_DATA_OBJEXTS = (1UL << 0),
341  	/* page has been accounted as a non-slab kernel page */
342  	MEMCG_DATA_KMEM = (1UL << 1),
343  	/* the next bit after the last actual flag */
344  	__NR_MEMCG_DATA_FLAGS  = (1UL << 2),
345  };
346  
347  #define __FIRST_OBJEXT_FLAG	__NR_MEMCG_DATA_FLAGS
348  
349  #else /* CONFIG_MEMCG */
350  
351  #define __FIRST_OBJEXT_FLAG	(1UL << 0)
352  
353  #endif /* CONFIG_MEMCG */
354  
355  enum objext_flags {
356  	/* slabobj_ext vector failed to allocate */
357  	OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
358  	/* the next bit after the last actual flag */
359  	__NR_OBJEXTS_FLAGS  = (__FIRST_OBJEXT_FLAG << 1),
360  };
361  
362  #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
363  
364  #ifdef CONFIG_MEMCG
365  
366  static inline bool folio_memcg_kmem(struct folio *folio);
367  
368  /*
369   * After the initialization objcg->memcg is always pointing at
370   * a valid memcg, but can be atomically swapped to the parent memcg.
371   *
372   * The caller must ensure that the returned memcg won't be released.
373   */
obj_cgroup_memcg(struct obj_cgroup * objcg)374  static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
375  {
376  	lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
377  	return READ_ONCE(objcg->memcg);
378  }
379  
380  /*
381   * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
382   * @folio: Pointer to the folio.
383   *
384   * Returns a pointer to the memory cgroup associated with the folio,
385   * or NULL. This function assumes that the folio is known to have a
386   * proper memory cgroup pointer. It's not safe to call this function
387   * against some type of folios, e.g. slab folios or ex-slab folios or
388   * kmem folios.
389   */
__folio_memcg(struct folio * folio)390  static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
391  {
392  	unsigned long memcg_data = folio->memcg_data;
393  
394  	VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
395  	VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
396  	VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
397  
398  	return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
399  }
400  
401  /*
402   * __folio_objcg - get the object cgroup associated with a kmem folio.
403   * @folio: Pointer to the folio.
404   *
405   * Returns a pointer to the object cgroup associated with the folio,
406   * or NULL. This function assumes that the folio is known to have a
407   * proper object cgroup pointer. It's not safe to call this function
408   * against some type of folios, e.g. slab folios or ex-slab folios or
409   * LRU folios.
410   */
__folio_objcg(struct folio * folio)411  static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
412  {
413  	unsigned long memcg_data = folio->memcg_data;
414  
415  	VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
416  	VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
417  	VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
418  
419  	return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
420  }
421  
422  /*
423   * folio_memcg - Get the memory cgroup associated with a folio.
424   * @folio: Pointer to the folio.
425   *
426   * Returns a pointer to the memory cgroup associated with the folio,
427   * or NULL. This function assumes that the folio is known to have a
428   * proper memory cgroup pointer. It's not safe to call this function
429   * against some type of folios, e.g. slab folios or ex-slab folios.
430   *
431   * For a non-kmem folio any of the following ensures folio and memcg binding
432   * stability:
433   *
434   * - the folio lock
435   * - LRU isolation
436   * - folio_memcg_lock()
437   * - exclusive reference
438   * - mem_cgroup_trylock_pages()
439   *
440   * For a kmem folio a caller should hold an rcu read lock to protect memcg
441   * associated with a kmem folio from being released.
442   */
folio_memcg(struct folio * folio)443  static inline struct mem_cgroup *folio_memcg(struct folio *folio)
444  {
445  	if (folio_memcg_kmem(folio))
446  		return obj_cgroup_memcg(__folio_objcg(folio));
447  	return __folio_memcg(folio);
448  }
449  
450  /*
451   * folio_memcg_charged - If a folio is charged to a memory cgroup.
452   * @folio: Pointer to the folio.
453   *
454   * Returns true if folio is charged to a memory cgroup, otherwise returns false.
455   */
folio_memcg_charged(struct folio * folio)456  static inline bool folio_memcg_charged(struct folio *folio)
457  {
458  	if (folio_memcg_kmem(folio))
459  		return __folio_objcg(folio) != NULL;
460  	return __folio_memcg(folio) != NULL;
461  }
462  
463  /**
464   * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
465   * @folio: Pointer to the folio.
466   *
467   * This function assumes that the folio is known to have a
468   * proper memory cgroup pointer. It's not safe to call this function
469   * against some type of folios, e.g. slab folios or ex-slab folios.
470   *
471   * Return: A pointer to the memory cgroup associated with the folio,
472   * or NULL.
473   */
folio_memcg_rcu(struct folio * folio)474  static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
475  {
476  	unsigned long memcg_data = READ_ONCE(folio->memcg_data);
477  
478  	VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
479  
480  	if (memcg_data & MEMCG_DATA_KMEM) {
481  		struct obj_cgroup *objcg;
482  
483  		objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
484  		return obj_cgroup_memcg(objcg);
485  	}
486  
487  	WARN_ON_ONCE(!rcu_read_lock_held());
488  
489  	return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
490  }
491  
492  /*
493   * folio_memcg_check - Get the memory cgroup associated with a folio.
494   * @folio: Pointer to the folio.
495   *
496   * Returns a pointer to the memory cgroup associated with the folio,
497   * or NULL. This function unlike folio_memcg() can take any folio
498   * as an argument. It has to be used in cases when it's not known if a folio
499   * has an associated memory cgroup pointer or an object cgroups vector or
500   * an object cgroup.
501   *
502   * For a non-kmem folio any of the following ensures folio and memcg binding
503   * stability:
504   *
505   * - the folio lock
506   * - LRU isolation
507   * - lock_folio_memcg()
508   * - exclusive reference
509   * - mem_cgroup_trylock_pages()
510   *
511   * For a kmem folio a caller should hold an rcu read lock to protect memcg
512   * associated with a kmem folio from being released.
513   */
folio_memcg_check(struct folio * folio)514  static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
515  {
516  	/*
517  	 * Because folio->memcg_data might be changed asynchronously
518  	 * for slabs, READ_ONCE() should be used here.
519  	 */
520  	unsigned long memcg_data = READ_ONCE(folio->memcg_data);
521  
522  	if (memcg_data & MEMCG_DATA_OBJEXTS)
523  		return NULL;
524  
525  	if (memcg_data & MEMCG_DATA_KMEM) {
526  		struct obj_cgroup *objcg;
527  
528  		objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
529  		return obj_cgroup_memcg(objcg);
530  	}
531  
532  	return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
533  }
534  
page_memcg_check(struct page * page)535  static inline struct mem_cgroup *page_memcg_check(struct page *page)
536  {
537  	if (PageTail(page))
538  		return NULL;
539  	return folio_memcg_check((struct folio *)page);
540  }
541  
get_mem_cgroup_from_objcg(struct obj_cgroup * objcg)542  static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
543  {
544  	struct mem_cgroup *memcg;
545  
546  	rcu_read_lock();
547  retry:
548  	memcg = obj_cgroup_memcg(objcg);
549  	if (unlikely(!css_tryget(&memcg->css)))
550  		goto retry;
551  	rcu_read_unlock();
552  
553  	return memcg;
554  }
555  
556  /*
557   * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
558   * @folio: Pointer to the folio.
559   *
560   * Checks if the folio has MemcgKmem flag set. The caller must ensure
561   * that the folio has an associated memory cgroup. It's not safe to call
562   * this function against some types of folios, e.g. slab folios.
563   */
folio_memcg_kmem(struct folio * folio)564  static inline bool folio_memcg_kmem(struct folio *folio)
565  {
566  	VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
567  	VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio);
568  	return folio->memcg_data & MEMCG_DATA_KMEM;
569  }
570  
PageMemcgKmem(struct page * page)571  static inline bool PageMemcgKmem(struct page *page)
572  {
573  	return folio_memcg_kmem(page_folio(page));
574  }
575  
mem_cgroup_is_root(struct mem_cgroup * memcg)576  static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
577  {
578  	return (memcg == root_mem_cgroup);
579  }
580  
mem_cgroup_disabled(void)581  static inline bool mem_cgroup_disabled(void)
582  {
583  	return !cgroup_subsys_enabled(memory_cgrp_subsys);
584  }
585  
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)586  static inline void mem_cgroup_protection(struct mem_cgroup *root,
587  					 struct mem_cgroup *memcg,
588  					 unsigned long *min,
589  					 unsigned long *low)
590  {
591  	*min = *low = 0;
592  
593  	if (mem_cgroup_disabled())
594  		return;
595  
596  	/*
597  	 * There is no reclaim protection applied to a targeted reclaim.
598  	 * We are special casing this specific case here because
599  	 * mem_cgroup_calculate_protection is not robust enough to keep
600  	 * the protection invariant for calculated effective values for
601  	 * parallel reclaimers with different reclaim target. This is
602  	 * especially a problem for tail memcgs (as they have pages on LRU)
603  	 * which would want to have effective values 0 for targeted reclaim
604  	 * but a different value for external reclaim.
605  	 *
606  	 * Example
607  	 * Let's have global and A's reclaim in parallel:
608  	 *  |
609  	 *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
610  	 *  |\
611  	 *  | C (low = 1G, usage = 2.5G)
612  	 *  B (low = 1G, usage = 0.5G)
613  	 *
614  	 * For the global reclaim
615  	 * A.elow = A.low
616  	 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
617  	 * C.elow = min(C.usage, C.low)
618  	 *
619  	 * With the effective values resetting we have A reclaim
620  	 * A.elow = 0
621  	 * B.elow = B.low
622  	 * C.elow = C.low
623  	 *
624  	 * If the global reclaim races with A's reclaim then
625  	 * B.elow = C.elow = 0 because children_low_usage > A.elow)
626  	 * is possible and reclaiming B would be violating the protection.
627  	 *
628  	 */
629  	if (root == memcg)
630  		return;
631  
632  	*min = READ_ONCE(memcg->memory.emin);
633  	*low = READ_ONCE(memcg->memory.elow);
634  }
635  
636  void mem_cgroup_calculate_protection(struct mem_cgroup *root,
637  				     struct mem_cgroup *memcg);
638  
mem_cgroup_unprotected(struct mem_cgroup * target,struct mem_cgroup * memcg)639  static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
640  					  struct mem_cgroup *memcg)
641  {
642  	/*
643  	 * The root memcg doesn't account charges, and doesn't support
644  	 * protection. The target memcg's protection is ignored, see
645  	 * mem_cgroup_calculate_protection() and mem_cgroup_protection()
646  	 */
647  	return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
648  		memcg == target;
649  }
650  
mem_cgroup_below_low(struct mem_cgroup * target,struct mem_cgroup * memcg)651  static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
652  					struct mem_cgroup *memcg)
653  {
654  	if (mem_cgroup_unprotected(target, memcg))
655  		return false;
656  
657  	return READ_ONCE(memcg->memory.elow) >=
658  		page_counter_read(&memcg->memory);
659  }
660  
mem_cgroup_below_min(struct mem_cgroup * target,struct mem_cgroup * memcg)661  static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
662  					struct mem_cgroup *memcg)
663  {
664  	if (mem_cgroup_unprotected(target, memcg))
665  		return false;
666  
667  	return READ_ONCE(memcg->memory.emin) >=
668  		page_counter_read(&memcg->memory);
669  }
670  
671  void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
672  
673  int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
674  
675  /**
676   * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
677   * @folio: Folio to charge.
678   * @mm: mm context of the allocating task.
679   * @gfp: Reclaim mode.
680   *
681   * Try to charge @folio to the memcg that @mm belongs to, reclaiming
682   * pages according to @gfp if necessary.  If @mm is NULL, try to
683   * charge to the active memcg.
684   *
685   * Do not use this for folios allocated for swapin.
686   *
687   * Return: 0 on success. Otherwise, an error code is returned.
688   */
mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)689  static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
690  				    gfp_t gfp)
691  {
692  	if (mem_cgroup_disabled())
693  		return 0;
694  	return __mem_cgroup_charge(folio, mm, gfp);
695  }
696  
697  int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
698  		long nr_pages);
699  
700  int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
701  				  gfp_t gfp, swp_entry_t entry);
702  
703  void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
704  
705  void __mem_cgroup_uncharge(struct folio *folio);
706  
707  /**
708   * mem_cgroup_uncharge - Uncharge a folio.
709   * @folio: Folio to uncharge.
710   *
711   * Uncharge a folio previously charged with mem_cgroup_charge().
712   */
mem_cgroup_uncharge(struct folio * folio)713  static inline void mem_cgroup_uncharge(struct folio *folio)
714  {
715  	if (mem_cgroup_disabled())
716  		return;
717  	__mem_cgroup_uncharge(folio);
718  }
719  
720  void __mem_cgroup_uncharge_folios(struct folio_batch *folios);
mem_cgroup_uncharge_folios(struct folio_batch * folios)721  static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
722  {
723  	if (mem_cgroup_disabled())
724  		return;
725  	__mem_cgroup_uncharge_folios(folios);
726  }
727  
728  void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
729  void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
730  void mem_cgroup_migrate(struct folio *old, struct folio *new);
731  
732  /**
733   * mem_cgroup_lruvec - get the lru list vector for a memcg & node
734   * @memcg: memcg of the wanted lruvec
735   * @pgdat: pglist_data
736   *
737   * Returns the lru list vector holding pages for a given @memcg &
738   * @pgdat combination. This can be the node lruvec, if the memory
739   * controller is disabled.
740   */
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)741  static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
742  					       struct pglist_data *pgdat)
743  {
744  	struct mem_cgroup_per_node *mz;
745  	struct lruvec *lruvec;
746  
747  	if (mem_cgroup_disabled()) {
748  		lruvec = &pgdat->__lruvec;
749  		goto out;
750  	}
751  
752  	if (!memcg)
753  		memcg = root_mem_cgroup;
754  
755  	mz = memcg->nodeinfo[pgdat->node_id];
756  	lruvec = &mz->lruvec;
757  out:
758  	/*
759  	 * Since a node can be onlined after the mem_cgroup was created,
760  	 * we have to be prepared to initialize lruvec->pgdat here;
761  	 * and if offlined then reonlined, we need to reinitialize it.
762  	 */
763  	if (unlikely(lruvec->pgdat != pgdat))
764  		lruvec->pgdat = pgdat;
765  	return lruvec;
766  }
767  
768  /**
769   * folio_lruvec - return lruvec for isolating/putting an LRU folio
770   * @folio: Pointer to the folio.
771   *
772   * This function relies on folio->mem_cgroup being stable.
773   */
folio_lruvec(struct folio * folio)774  static inline struct lruvec *folio_lruvec(struct folio *folio)
775  {
776  	struct mem_cgroup *memcg = folio_memcg(folio);
777  
778  	VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
779  	return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
780  }
781  
782  struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
783  
784  struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
785  
786  struct mem_cgroup *get_mem_cgroup_from_current(void);
787  
788  struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
789  
790  struct lruvec *folio_lruvec_lock(struct folio *folio);
791  struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
792  struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
793  						unsigned long *flags);
794  
795  #ifdef CONFIG_DEBUG_VM
796  void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
797  #else
798  static inline
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)799  void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
800  {
801  }
802  #endif
803  
804  static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)805  struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
806  	return css ? container_of(css, struct mem_cgroup, css) : NULL;
807  }
808  
obj_cgroup_tryget(struct obj_cgroup * objcg)809  static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
810  {
811  	return percpu_ref_tryget(&objcg->refcnt);
812  }
813  
obj_cgroup_get(struct obj_cgroup * objcg)814  static inline void obj_cgroup_get(struct obj_cgroup *objcg)
815  {
816  	percpu_ref_get(&objcg->refcnt);
817  }
818  
obj_cgroup_get_many(struct obj_cgroup * objcg,unsigned long nr)819  static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
820  				       unsigned long nr)
821  {
822  	percpu_ref_get_many(&objcg->refcnt, nr);
823  }
824  
obj_cgroup_put(struct obj_cgroup * objcg)825  static inline void obj_cgroup_put(struct obj_cgroup *objcg)
826  {
827  	if (objcg)
828  		percpu_ref_put(&objcg->refcnt);
829  }
830  
mem_cgroup_tryget(struct mem_cgroup * memcg)831  static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
832  {
833  	return !memcg || css_tryget(&memcg->css);
834  }
835  
mem_cgroup_tryget_online(struct mem_cgroup * memcg)836  static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
837  {
838  	return !memcg || css_tryget_online(&memcg->css);
839  }
840  
mem_cgroup_put(struct mem_cgroup * memcg)841  static inline void mem_cgroup_put(struct mem_cgroup *memcg)
842  {
843  	if (memcg)
844  		css_put(&memcg->css);
845  }
846  
847  #define mem_cgroup_from_counter(counter, member)	\
848  	container_of(counter, struct mem_cgroup, member)
849  
850  struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
851  				   struct mem_cgroup *,
852  				   struct mem_cgroup_reclaim_cookie *);
853  void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
854  void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
855  			   int (*)(struct task_struct *, void *), void *arg);
856  
mem_cgroup_id(struct mem_cgroup * memcg)857  static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
858  {
859  	if (mem_cgroup_disabled())
860  		return 0;
861  
862  	return memcg->id.id;
863  }
864  struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
865  
866  #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_ino(struct mem_cgroup * memcg)867  static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
868  {
869  	return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
870  }
871  
872  struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
873  #endif
874  
mem_cgroup_from_seq(struct seq_file * m)875  static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
876  {
877  	return mem_cgroup_from_css(seq_css(m));
878  }
879  
lruvec_memcg(struct lruvec * lruvec)880  static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
881  {
882  	struct mem_cgroup_per_node *mz;
883  
884  	if (mem_cgroup_disabled())
885  		return NULL;
886  
887  	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
888  	return mz->memcg;
889  }
890  
891  /**
892   * parent_mem_cgroup - find the accounting parent of a memcg
893   * @memcg: memcg whose parent to find
894   *
895   * Returns the parent memcg, or NULL if this is the root.
896   */
parent_mem_cgroup(struct mem_cgroup * memcg)897  static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
898  {
899  	return mem_cgroup_from_css(memcg->css.parent);
900  }
901  
mem_cgroup_is_descendant(struct mem_cgroup * memcg,struct mem_cgroup * root)902  static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
903  			      struct mem_cgroup *root)
904  {
905  	if (root == memcg)
906  		return true;
907  	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
908  }
909  
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)910  static inline bool mm_match_cgroup(struct mm_struct *mm,
911  				   struct mem_cgroup *memcg)
912  {
913  	struct mem_cgroup *task_memcg;
914  	bool match = false;
915  
916  	rcu_read_lock();
917  	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
918  	if (task_memcg)
919  		match = mem_cgroup_is_descendant(task_memcg, memcg);
920  	rcu_read_unlock();
921  	return match;
922  }
923  
924  struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
925  ino_t page_cgroup_ino(struct page *page);
926  
mem_cgroup_online(struct mem_cgroup * memcg)927  static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
928  {
929  	if (mem_cgroup_disabled())
930  		return true;
931  	return !!(memcg->css.flags & CSS_ONLINE);
932  }
933  
934  void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
935  		int zid, int nr_pages);
936  
937  static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)938  unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
939  		enum lru_list lru, int zone_idx)
940  {
941  	struct mem_cgroup_per_node *mz;
942  
943  	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
944  	return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
945  }
946  
947  void mem_cgroup_handle_over_high(gfp_t gfp_mask);
948  
949  unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
950  
951  unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
952  
953  void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
954  				struct task_struct *p);
955  
956  void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
957  
958  struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
959  					    struct mem_cgroup *oom_domain);
960  void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
961  
962  void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
963  		       int val);
964  
965  /* idx can be of type enum memcg_stat_item or node_stat_item */
mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)966  static inline void mod_memcg_state(struct mem_cgroup *memcg,
967  				   enum memcg_stat_item idx, int val)
968  {
969  	unsigned long flags;
970  
971  	local_irq_save(flags);
972  	__mod_memcg_state(memcg, idx, val);
973  	local_irq_restore(flags);
974  }
975  
mod_memcg_page_state(struct page * page,enum memcg_stat_item idx,int val)976  static inline void mod_memcg_page_state(struct page *page,
977  					enum memcg_stat_item idx, int val)
978  {
979  	struct mem_cgroup *memcg;
980  
981  	if (mem_cgroup_disabled())
982  		return;
983  
984  	rcu_read_lock();
985  	memcg = folio_memcg(page_folio(page));
986  	if (memcg)
987  		mod_memcg_state(memcg, idx, val);
988  	rcu_read_unlock();
989  }
990  
991  unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
992  unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
993  unsigned long lruvec_page_state_local(struct lruvec *lruvec,
994  				      enum node_stat_item idx);
995  
996  void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
997  void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
998  
999  void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
1000  
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1001  static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1002  					 int val)
1003  {
1004  	unsigned long flags;
1005  
1006  	local_irq_save(flags);
1007  	__mod_lruvec_kmem_state(p, idx, val);
1008  	local_irq_restore(flags);
1009  }
1010  
1011  void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1012  			  unsigned long count);
1013  
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1014  static inline void count_memcg_events(struct mem_cgroup *memcg,
1015  				      enum vm_event_item idx,
1016  				      unsigned long count)
1017  {
1018  	unsigned long flags;
1019  
1020  	local_irq_save(flags);
1021  	__count_memcg_events(memcg, idx, count);
1022  	local_irq_restore(flags);
1023  }
1024  
count_memcg_folio_events(struct folio * folio,enum vm_event_item idx,unsigned long nr)1025  static inline void count_memcg_folio_events(struct folio *folio,
1026  		enum vm_event_item idx, unsigned long nr)
1027  {
1028  	struct mem_cgroup *memcg = folio_memcg(folio);
1029  
1030  	if (memcg)
1031  		count_memcg_events(memcg, idx, nr);
1032  }
1033  
count_memcg_events_mm(struct mm_struct * mm,enum vm_event_item idx,unsigned long count)1034  static inline void count_memcg_events_mm(struct mm_struct *mm,
1035  					enum vm_event_item idx, unsigned long count)
1036  {
1037  	struct mem_cgroup *memcg;
1038  
1039  	if (mem_cgroup_disabled())
1040  		return;
1041  
1042  	rcu_read_lock();
1043  	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1044  	if (likely(memcg))
1045  		count_memcg_events(memcg, idx, count);
1046  	rcu_read_unlock();
1047  }
1048  
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1049  static inline void count_memcg_event_mm(struct mm_struct *mm,
1050  					enum vm_event_item idx)
1051  {
1052  	count_memcg_events_mm(mm, idx, 1);
1053  }
1054  
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)1055  static inline void memcg_memory_event(struct mem_cgroup *memcg,
1056  				      enum memcg_memory_event event)
1057  {
1058  	bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1059  			  event == MEMCG_SWAP_FAIL;
1060  
1061  	atomic_long_inc(&memcg->memory_events_local[event]);
1062  	if (!swap_event)
1063  		cgroup_file_notify(&memcg->events_local_file);
1064  
1065  	do {
1066  		atomic_long_inc(&memcg->memory_events[event]);
1067  		if (swap_event)
1068  			cgroup_file_notify(&memcg->swap_events_file);
1069  		else
1070  			cgroup_file_notify(&memcg->events_file);
1071  
1072  		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1073  			break;
1074  		if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1075  			break;
1076  	} while ((memcg = parent_mem_cgroup(memcg)) &&
1077  		 !mem_cgroup_is_root(memcg));
1078  }
1079  
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)1080  static inline void memcg_memory_event_mm(struct mm_struct *mm,
1081  					 enum memcg_memory_event event)
1082  {
1083  	struct mem_cgroup *memcg;
1084  
1085  	if (mem_cgroup_disabled())
1086  		return;
1087  
1088  	rcu_read_lock();
1089  	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1090  	if (likely(memcg))
1091  		memcg_memory_event(memcg, event);
1092  	rcu_read_unlock();
1093  }
1094  
1095  void split_page_memcg(struct page *head, int old_order, int new_order);
1096  
1097  #else /* CONFIG_MEMCG */
1098  
1099  #define MEM_CGROUP_ID_SHIFT	0
1100  
folio_memcg(struct folio * folio)1101  static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1102  {
1103  	return NULL;
1104  }
1105  
folio_memcg_rcu(struct folio * folio)1106  static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
1107  {
1108  	WARN_ON_ONCE(!rcu_read_lock_held());
1109  	return NULL;
1110  }
1111  
folio_memcg_check(struct folio * folio)1112  static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
1113  {
1114  	return NULL;
1115  }
1116  
page_memcg_check(struct page * page)1117  static inline struct mem_cgroup *page_memcg_check(struct page *page)
1118  {
1119  	return NULL;
1120  }
1121  
get_mem_cgroup_from_objcg(struct obj_cgroup * objcg)1122  static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
1123  {
1124  	return NULL;
1125  }
1126  
folio_memcg_kmem(struct folio * folio)1127  static inline bool folio_memcg_kmem(struct folio *folio)
1128  {
1129  	return false;
1130  }
1131  
PageMemcgKmem(struct page * page)1132  static inline bool PageMemcgKmem(struct page *page)
1133  {
1134  	return false;
1135  }
1136  
mem_cgroup_is_root(struct mem_cgroup * memcg)1137  static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1138  {
1139  	return true;
1140  }
1141  
mem_cgroup_disabled(void)1142  static inline bool mem_cgroup_disabled(void)
1143  {
1144  	return true;
1145  }
1146  
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)1147  static inline void memcg_memory_event(struct mem_cgroup *memcg,
1148  				      enum memcg_memory_event event)
1149  {
1150  }
1151  
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)1152  static inline void memcg_memory_event_mm(struct mm_struct *mm,
1153  					 enum memcg_memory_event event)
1154  {
1155  }
1156  
mem_cgroup_protection(struct mem_cgroup * root,struct mem_cgroup * memcg,unsigned long * min,unsigned long * low)1157  static inline void mem_cgroup_protection(struct mem_cgroup *root,
1158  					 struct mem_cgroup *memcg,
1159  					 unsigned long *min,
1160  					 unsigned long *low)
1161  {
1162  	*min = *low = 0;
1163  }
1164  
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)1165  static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1166  						   struct mem_cgroup *memcg)
1167  {
1168  }
1169  
mem_cgroup_unprotected(struct mem_cgroup * target,struct mem_cgroup * memcg)1170  static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
1171  					  struct mem_cgroup *memcg)
1172  {
1173  	return true;
1174  }
mem_cgroup_below_low(struct mem_cgroup * target,struct mem_cgroup * memcg)1175  static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
1176  					struct mem_cgroup *memcg)
1177  {
1178  	return false;
1179  }
1180  
mem_cgroup_below_min(struct mem_cgroup * target,struct mem_cgroup * memcg)1181  static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
1182  					struct mem_cgroup *memcg)
1183  {
1184  	return false;
1185  }
1186  
mem_cgroup_commit_charge(struct folio * folio,struct mem_cgroup * memcg)1187  static inline void mem_cgroup_commit_charge(struct folio *folio,
1188  		struct mem_cgroup *memcg)
1189  {
1190  }
1191  
mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)1192  static inline int mem_cgroup_charge(struct folio *folio,
1193  		struct mm_struct *mm, gfp_t gfp)
1194  {
1195  	return 0;
1196  }
1197  
mem_cgroup_hugetlb_try_charge(struct mem_cgroup * memcg,gfp_t gfp,long nr_pages)1198  static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
1199  		gfp_t gfp, long nr_pages)
1200  {
1201  	return 0;
1202  }
1203  
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)1204  static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
1205  			struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1206  {
1207  	return 0;
1208  }
1209  
mem_cgroup_swapin_uncharge_swap(swp_entry_t entry,unsigned int nr)1210  static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr)
1211  {
1212  }
1213  
mem_cgroup_uncharge(struct folio * folio)1214  static inline void mem_cgroup_uncharge(struct folio *folio)
1215  {
1216  }
1217  
mem_cgroup_uncharge_folios(struct folio_batch * folios)1218  static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
1219  {
1220  }
1221  
mem_cgroup_cancel_charge(struct mem_cgroup * memcg,unsigned int nr_pages)1222  static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
1223  		unsigned int nr_pages)
1224  {
1225  }
1226  
mem_cgroup_replace_folio(struct folio * old,struct folio * new)1227  static inline void mem_cgroup_replace_folio(struct folio *old,
1228  		struct folio *new)
1229  {
1230  }
1231  
mem_cgroup_migrate(struct folio * old,struct folio * new)1232  static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
1233  {
1234  }
1235  
mem_cgroup_lruvec(struct mem_cgroup * memcg,struct pglist_data * pgdat)1236  static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1237  					       struct pglist_data *pgdat)
1238  {
1239  	return &pgdat->__lruvec;
1240  }
1241  
folio_lruvec(struct folio * folio)1242  static inline struct lruvec *folio_lruvec(struct folio *folio)
1243  {
1244  	struct pglist_data *pgdat = folio_pgdat(folio);
1245  	return &pgdat->__lruvec;
1246  }
1247  
1248  static inline
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1249  void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1250  {
1251  }
1252  
parent_mem_cgroup(struct mem_cgroup * memcg)1253  static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1254  {
1255  	return NULL;
1256  }
1257  
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)1258  static inline bool mm_match_cgroup(struct mm_struct *mm,
1259  		struct mem_cgroup *memcg)
1260  {
1261  	return true;
1262  }
1263  
get_mem_cgroup_from_mm(struct mm_struct * mm)1264  static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1265  {
1266  	return NULL;
1267  }
1268  
get_mem_cgroup_from_current(void)1269  static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1270  {
1271  	return NULL;
1272  }
1273  
get_mem_cgroup_from_folio(struct folio * folio)1274  static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
1275  {
1276  	return NULL;
1277  }
1278  
1279  static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)1280  struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1281  {
1282  	return NULL;
1283  }
1284  
obj_cgroup_put(struct obj_cgroup * objcg)1285  static inline void obj_cgroup_put(struct obj_cgroup *objcg)
1286  {
1287  }
1288  
mem_cgroup_tryget(struct mem_cgroup * memcg)1289  static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1290  {
1291  	return true;
1292  }
1293  
mem_cgroup_tryget_online(struct mem_cgroup * memcg)1294  static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
1295  {
1296  	return true;
1297  }
1298  
mem_cgroup_put(struct mem_cgroup * memcg)1299  static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1300  {
1301  }
1302  
folio_lruvec_lock(struct folio * folio)1303  static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
1304  {
1305  	struct pglist_data *pgdat = folio_pgdat(folio);
1306  
1307  	spin_lock(&pgdat->__lruvec.lru_lock);
1308  	return &pgdat->__lruvec;
1309  }
1310  
folio_lruvec_lock_irq(struct folio * folio)1311  static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1312  {
1313  	struct pglist_data *pgdat = folio_pgdat(folio);
1314  
1315  	spin_lock_irq(&pgdat->__lruvec.lru_lock);
1316  	return &pgdat->__lruvec;
1317  }
1318  
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flagsp)1319  static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1320  		unsigned long *flagsp)
1321  {
1322  	struct pglist_data *pgdat = folio_pgdat(folio);
1323  
1324  	spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1325  	return &pgdat->__lruvec;
1326  }
1327  
1328  static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1329  mem_cgroup_iter(struct mem_cgroup *root,
1330  		struct mem_cgroup *prev,
1331  		struct mem_cgroup_reclaim_cookie *reclaim)
1332  {
1333  	return NULL;
1334  }
1335  
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1336  static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1337  					 struct mem_cgroup *prev)
1338  {
1339  }
1340  
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1341  static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1342  		int (*fn)(struct task_struct *, void *), void *arg)
1343  {
1344  }
1345  
mem_cgroup_id(struct mem_cgroup * memcg)1346  static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1347  {
1348  	return 0;
1349  }
1350  
mem_cgroup_from_id(unsigned short id)1351  static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1352  {
1353  	WARN_ON_ONCE(id);
1354  	/* XXX: This should always return root_mem_cgroup */
1355  	return NULL;
1356  }
1357  
1358  #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_ino(struct mem_cgroup * memcg)1359  static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1360  {
1361  	return 0;
1362  }
1363  
mem_cgroup_get_from_ino(unsigned long ino)1364  static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
1365  {
1366  	return NULL;
1367  }
1368  #endif
1369  
mem_cgroup_from_seq(struct seq_file * m)1370  static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1371  {
1372  	return NULL;
1373  }
1374  
lruvec_memcg(struct lruvec * lruvec)1375  static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1376  {
1377  	return NULL;
1378  }
1379  
mem_cgroup_online(struct mem_cgroup * memcg)1380  static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1381  {
1382  	return true;
1383  }
1384  
1385  static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)1386  unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1387  		enum lru_list lru, int zone_idx)
1388  {
1389  	return 0;
1390  }
1391  
mem_cgroup_get_max(struct mem_cgroup * memcg)1392  static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1393  {
1394  	return 0;
1395  }
1396  
mem_cgroup_size(struct mem_cgroup * memcg)1397  static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1398  {
1399  	return 0;
1400  }
1401  
1402  static inline void
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1403  mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1404  {
1405  }
1406  
1407  static inline void
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1408  mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1409  {
1410  }
1411  
mem_cgroup_handle_over_high(gfp_t gfp_mask)1412  static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
1413  {
1414  }
1415  
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1416  static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1417  	struct task_struct *victim, struct mem_cgroup *oom_domain)
1418  {
1419  	return NULL;
1420  }
1421  
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1422  static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1423  {
1424  }
1425  
__mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int nr)1426  static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1427  				     enum memcg_stat_item idx,
1428  				     int nr)
1429  {
1430  }
1431  
mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int nr)1432  static inline void mod_memcg_state(struct mem_cgroup *memcg,
1433  				   enum memcg_stat_item idx,
1434  				   int nr)
1435  {
1436  }
1437  
mod_memcg_page_state(struct page * page,enum memcg_stat_item idx,int val)1438  static inline void mod_memcg_page_state(struct page *page,
1439  					enum memcg_stat_item idx, int val)
1440  {
1441  }
1442  
memcg_page_state(struct mem_cgroup * memcg,int idx)1443  static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1444  {
1445  	return 0;
1446  }
1447  
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)1448  static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1449  					      enum node_stat_item idx)
1450  {
1451  	return node_page_state(lruvec_pgdat(lruvec), idx);
1452  }
1453  
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)1454  static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1455  						    enum node_stat_item idx)
1456  {
1457  	return node_page_state(lruvec_pgdat(lruvec), idx);
1458  }
1459  
mem_cgroup_flush_stats(struct mem_cgroup * memcg)1460  static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
1461  {
1462  }
1463  
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)1464  static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
1465  {
1466  }
1467  
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1468  static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1469  					   int val)
1470  {
1471  	struct page *page = virt_to_head_page(p);
1472  
1473  	__mod_node_page_state(page_pgdat(page), idx, val);
1474  }
1475  
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)1476  static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1477  					 int val)
1478  {
1479  	struct page *page = virt_to_head_page(p);
1480  
1481  	mod_node_page_state(page_pgdat(page), idx, val);
1482  }
1483  
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1484  static inline void count_memcg_events(struct mem_cgroup *memcg,
1485  				      enum vm_event_item idx,
1486  				      unsigned long count)
1487  {
1488  }
1489  
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1490  static inline void __count_memcg_events(struct mem_cgroup *memcg,
1491  					enum vm_event_item idx,
1492  					unsigned long count)
1493  {
1494  }
1495  
count_memcg_folio_events(struct folio * folio,enum vm_event_item idx,unsigned long nr)1496  static inline void count_memcg_folio_events(struct folio *folio,
1497  		enum vm_event_item idx, unsigned long nr)
1498  {
1499  }
1500  
count_memcg_events_mm(struct mm_struct * mm,enum vm_event_item idx,unsigned long count)1501  static inline void count_memcg_events_mm(struct mm_struct *mm,
1502  					enum vm_event_item idx, unsigned long count)
1503  {
1504  }
1505  
1506  static inline
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1507  void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1508  {
1509  }
1510  
split_page_memcg(struct page * head,int old_order,int new_order)1511  static inline void split_page_memcg(struct page *head, int old_order, int new_order)
1512  {
1513  }
1514  #endif /* CONFIG_MEMCG */
1515  
1516  /*
1517   * Extended information for slab objects stored as an array in page->memcg_data
1518   * if MEMCG_DATA_OBJEXTS is set.
1519   */
1520  struct slabobj_ext {
1521  #ifdef CONFIG_MEMCG
1522  	struct obj_cgroup *objcg;
1523  #endif
1524  #ifdef CONFIG_MEM_ALLOC_PROFILING
1525  	union codetag_ref ref;
1526  #endif
1527  } __aligned(8);
1528  
__inc_lruvec_kmem_state(void * p,enum node_stat_item idx)1529  static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1530  {
1531  	__mod_lruvec_kmem_state(p, idx, 1);
1532  }
1533  
__dec_lruvec_kmem_state(void * p,enum node_stat_item idx)1534  static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1535  {
1536  	__mod_lruvec_kmem_state(p, idx, -1);
1537  }
1538  
parent_lruvec(struct lruvec * lruvec)1539  static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1540  {
1541  	struct mem_cgroup *memcg;
1542  
1543  	memcg = lruvec_memcg(lruvec);
1544  	if (!memcg)
1545  		return NULL;
1546  	memcg = parent_mem_cgroup(memcg);
1547  	if (!memcg)
1548  		return NULL;
1549  	return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1550  }
1551  
unlock_page_lruvec(struct lruvec * lruvec)1552  static inline void unlock_page_lruvec(struct lruvec *lruvec)
1553  {
1554  	spin_unlock(&lruvec->lru_lock);
1555  }
1556  
unlock_page_lruvec_irq(struct lruvec * lruvec)1557  static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1558  {
1559  	spin_unlock_irq(&lruvec->lru_lock);
1560  }
1561  
unlock_page_lruvec_irqrestore(struct lruvec * lruvec,unsigned long flags)1562  static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1563  		unsigned long flags)
1564  {
1565  	spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1566  }
1567  
1568  /* Test requires a stable folio->memcg binding, see folio_memcg() */
folio_matches_lruvec(struct folio * folio,struct lruvec * lruvec)1569  static inline bool folio_matches_lruvec(struct folio *folio,
1570  		struct lruvec *lruvec)
1571  {
1572  	return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1573  	       lruvec_memcg(lruvec) == folio_memcg(folio);
1574  }
1575  
1576  /* Don't lock again iff page's lruvec locked */
folio_lruvec_relock_irq(struct folio * folio,struct lruvec * locked_lruvec)1577  static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
1578  		struct lruvec *locked_lruvec)
1579  {
1580  	if (locked_lruvec) {
1581  		if (folio_matches_lruvec(folio, locked_lruvec))
1582  			return locked_lruvec;
1583  
1584  		unlock_page_lruvec_irq(locked_lruvec);
1585  	}
1586  
1587  	return folio_lruvec_lock_irq(folio);
1588  }
1589  
1590  /* Don't lock again iff folio's lruvec locked */
folio_lruvec_relock_irqsave(struct folio * folio,struct lruvec ** lruvecp,unsigned long * flags)1591  static inline void folio_lruvec_relock_irqsave(struct folio *folio,
1592  		struct lruvec **lruvecp, unsigned long *flags)
1593  {
1594  	if (*lruvecp) {
1595  		if (folio_matches_lruvec(folio, *lruvecp))
1596  			return;
1597  
1598  		unlock_page_lruvec_irqrestore(*lruvecp, *flags);
1599  	}
1600  
1601  	*lruvecp = folio_lruvec_lock_irqsave(folio, flags);
1602  }
1603  
1604  #ifdef CONFIG_CGROUP_WRITEBACK
1605  
1606  struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1607  void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1608  			 unsigned long *pheadroom, unsigned long *pdirty,
1609  			 unsigned long *pwriteback);
1610  
1611  void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
1612  					     struct bdi_writeback *wb);
1613  
mem_cgroup_track_foreign_dirty(struct folio * folio,struct bdi_writeback * wb)1614  static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1615  						  struct bdi_writeback *wb)
1616  {
1617  	struct mem_cgroup *memcg;
1618  
1619  	if (mem_cgroup_disabled())
1620  		return;
1621  
1622  	memcg = folio_memcg(folio);
1623  	if (unlikely(memcg && &memcg->css != wb->memcg_css))
1624  		mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
1625  }
1626  
1627  void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1628  
1629  #else	/* CONFIG_CGROUP_WRITEBACK */
1630  
mem_cgroup_wb_domain(struct bdi_writeback * wb)1631  static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1632  {
1633  	return NULL;
1634  }
1635  
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)1636  static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1637  				       unsigned long *pfilepages,
1638  				       unsigned long *pheadroom,
1639  				       unsigned long *pdirty,
1640  				       unsigned long *pwriteback)
1641  {
1642  }
1643  
mem_cgroup_track_foreign_dirty(struct folio * folio,struct bdi_writeback * wb)1644  static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1645  						  struct bdi_writeback *wb)
1646  {
1647  }
1648  
mem_cgroup_flush_foreign(struct bdi_writeback * wb)1649  static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1650  {
1651  }
1652  
1653  #endif	/* CONFIG_CGROUP_WRITEBACK */
1654  
1655  struct sock;
1656  bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1657  			     gfp_t gfp_mask);
1658  void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1659  #ifdef CONFIG_MEMCG
1660  extern struct static_key_false memcg_sockets_enabled_key;
1661  #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1662  void mem_cgroup_sk_alloc(struct sock *sk);
1663  void mem_cgroup_sk_free(struct sock *sk);
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1664  static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1665  {
1666  #ifdef CONFIG_MEMCG_V1
1667  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1668  		return !!memcg->tcpmem_pressure;
1669  #endif /* CONFIG_MEMCG_V1 */
1670  	do {
1671  		if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
1672  			return true;
1673  	} while ((memcg = parent_mem_cgroup(memcg)));
1674  	return false;
1675  }
1676  
1677  int alloc_shrinker_info(struct mem_cgroup *memcg);
1678  void free_shrinker_info(struct mem_cgroup *memcg);
1679  void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1680  void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1681  #else
1682  #define mem_cgroup_sockets_enabled 0
mem_cgroup_sk_alloc(struct sock * sk)1683  static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
mem_cgroup_sk_free(struct sock * sk)1684  static inline void mem_cgroup_sk_free(struct sock *sk) { };
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1685  static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1686  {
1687  	return false;
1688  }
1689  
set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)1690  static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1691  				    int nid, int shrinker_id)
1692  {
1693  }
1694  #endif
1695  
1696  #ifdef CONFIG_MEMCG
1697  bool mem_cgroup_kmem_disabled(void);
1698  int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1699  void __memcg_kmem_uncharge_page(struct page *page, int order);
1700  
1701  /*
1702   * The returned objcg pointer is safe to use without additional
1703   * protection within a scope. The scope is defined either by
1704   * the current task (similar to the "current" global variable)
1705   * or by set_active_memcg() pair.
1706   * Please, use obj_cgroup_get() to get a reference if the pointer
1707   * needs to be used outside of the local scope.
1708   */
1709  struct obj_cgroup *current_obj_cgroup(void);
1710  struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
1711  
get_obj_cgroup_from_current(void)1712  static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
1713  {
1714  	struct obj_cgroup *objcg = current_obj_cgroup();
1715  
1716  	if (objcg)
1717  		obj_cgroup_get(objcg);
1718  
1719  	return objcg;
1720  }
1721  
1722  int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1723  void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1724  
1725  extern struct static_key_false memcg_bpf_enabled_key;
memcg_bpf_enabled(void)1726  static inline bool memcg_bpf_enabled(void)
1727  {
1728  	return static_branch_likely(&memcg_bpf_enabled_key);
1729  }
1730  
1731  extern struct static_key_false memcg_kmem_online_key;
1732  
memcg_kmem_online(void)1733  static inline bool memcg_kmem_online(void)
1734  {
1735  	return static_branch_likely(&memcg_kmem_online_key);
1736  }
1737  
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1738  static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1739  					 int order)
1740  {
1741  	if (memcg_kmem_online())
1742  		return __memcg_kmem_charge_page(page, gfp, order);
1743  	return 0;
1744  }
1745  
memcg_kmem_uncharge_page(struct page * page,int order)1746  static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1747  {
1748  	if (memcg_kmem_online())
1749  		__memcg_kmem_uncharge_page(page, order);
1750  }
1751  
1752  /*
1753   * A helper for accessing memcg's kmem_id, used for getting
1754   * corresponding LRU lists.
1755   */
memcg_kmem_id(struct mem_cgroup * memcg)1756  static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1757  {
1758  	return memcg ? memcg->kmemcg_id : -1;
1759  }
1760  
1761  struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
1762  
count_objcg_events(struct obj_cgroup * objcg,enum vm_event_item idx,unsigned long count)1763  static inline void count_objcg_events(struct obj_cgroup *objcg,
1764  				      enum vm_event_item idx,
1765  				      unsigned long count)
1766  {
1767  	struct mem_cgroup *memcg;
1768  
1769  	if (!memcg_kmem_online())
1770  		return;
1771  
1772  	rcu_read_lock();
1773  	memcg = obj_cgroup_memcg(objcg);
1774  	count_memcg_events(memcg, idx, count);
1775  	rcu_read_unlock();
1776  }
1777  
1778  #else
mem_cgroup_kmem_disabled(void)1779  static inline bool mem_cgroup_kmem_disabled(void)
1780  {
1781  	return true;
1782  }
1783  
memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1784  static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1785  					 int order)
1786  {
1787  	return 0;
1788  }
1789  
memcg_kmem_uncharge_page(struct page * page,int order)1790  static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1791  {
1792  }
1793  
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)1794  static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1795  					   int order)
1796  {
1797  	return 0;
1798  }
1799  
__memcg_kmem_uncharge_page(struct page * page,int order)1800  static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1801  {
1802  }
1803  
get_obj_cgroup_from_folio(struct folio * folio)1804  static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
1805  {
1806  	return NULL;
1807  }
1808  
memcg_bpf_enabled(void)1809  static inline bool memcg_bpf_enabled(void)
1810  {
1811  	return false;
1812  }
1813  
memcg_kmem_online(void)1814  static inline bool memcg_kmem_online(void)
1815  {
1816  	return false;
1817  }
1818  
memcg_kmem_id(struct mem_cgroup * memcg)1819  static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1820  {
1821  	return -1;
1822  }
1823  
mem_cgroup_from_slab_obj(void * p)1824  static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
1825  {
1826  	return NULL;
1827  }
1828  
count_objcg_events(struct obj_cgroup * objcg,enum vm_event_item idx,unsigned long count)1829  static inline void count_objcg_events(struct obj_cgroup *objcg,
1830  				      enum vm_event_item idx,
1831  				      unsigned long count)
1832  {
1833  }
1834  
1835  #endif /* CONFIG_MEMCG */
1836  
1837  #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
1838  bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
1839  void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
1840  void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
1841  bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
1842  #else
obj_cgroup_may_zswap(struct obj_cgroup * objcg)1843  static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
1844  {
1845  	return true;
1846  }
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)1847  static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
1848  					   size_t size)
1849  {
1850  }
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)1851  static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
1852  					     size_t size)
1853  {
1854  }
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)1855  static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
1856  {
1857  	/* if zswap is disabled, do not block pages going to the swapping device */
1858  	return true;
1859  }
1860  #endif
1861  
1862  
1863  /* Cgroup v1-related declarations */
1864  
1865  #ifdef CONFIG_MEMCG_V1
1866  unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
1867  					gfp_t gfp_mask,
1868  					unsigned long *total_scanned);
1869  
1870  bool mem_cgroup_oom_synchronize(bool wait);
1871  
task_in_memcg_oom(struct task_struct * p)1872  static inline bool task_in_memcg_oom(struct task_struct *p)
1873  {
1874  	return p->memcg_in_oom;
1875  }
1876  
1877  void folio_memcg_lock(struct folio *folio);
1878  void folio_memcg_unlock(struct folio *folio);
1879  
1880  /* try to stablize folio_memcg() for all the pages in a memcg */
mem_cgroup_trylock_pages(struct mem_cgroup * memcg)1881  static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1882  {
1883  	rcu_read_lock();
1884  
1885  	if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
1886  		return true;
1887  
1888  	rcu_read_unlock();
1889  	return false;
1890  }
1891  
mem_cgroup_unlock_pages(void)1892  static inline void mem_cgroup_unlock_pages(void)
1893  {
1894  	rcu_read_unlock();
1895  }
1896  
mem_cgroup_enter_user_fault(void)1897  static inline void mem_cgroup_enter_user_fault(void)
1898  {
1899  	WARN_ON(current->in_user_fault);
1900  	current->in_user_fault = 1;
1901  }
1902  
mem_cgroup_exit_user_fault(void)1903  static inline void mem_cgroup_exit_user_fault(void)
1904  {
1905  	WARN_ON(!current->in_user_fault);
1906  	current->in_user_fault = 0;
1907  }
1908  
1909  #else /* CONFIG_MEMCG_V1 */
1910  static inline
memcg1_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)1911  unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
1912  					gfp_t gfp_mask,
1913  					unsigned long *total_scanned)
1914  {
1915  	return 0;
1916  }
1917  
folio_memcg_lock(struct folio * folio)1918  static inline void folio_memcg_lock(struct folio *folio)
1919  {
1920  }
1921  
folio_memcg_unlock(struct folio * folio)1922  static inline void folio_memcg_unlock(struct folio *folio)
1923  {
1924  }
1925  
mem_cgroup_trylock_pages(struct mem_cgroup * memcg)1926  static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1927  {
1928  	/* to match folio_memcg_rcu() */
1929  	rcu_read_lock();
1930  	return true;
1931  }
1932  
mem_cgroup_unlock_pages(void)1933  static inline void mem_cgroup_unlock_pages(void)
1934  {
1935  	rcu_read_unlock();
1936  }
1937  
task_in_memcg_oom(struct task_struct * p)1938  static inline bool task_in_memcg_oom(struct task_struct *p)
1939  {
1940  	return false;
1941  }
1942  
mem_cgroup_oom_synchronize(bool wait)1943  static inline bool mem_cgroup_oom_synchronize(bool wait)
1944  {
1945  	return false;
1946  }
1947  
mem_cgroup_enter_user_fault(void)1948  static inline void mem_cgroup_enter_user_fault(void)
1949  {
1950  }
1951  
mem_cgroup_exit_user_fault(void)1952  static inline void mem_cgroup_exit_user_fault(void)
1953  {
1954  }
1955  
1956  #endif /* CONFIG_MEMCG_V1 */
1957  
1958  #endif /* _LINUX_MEMCONTROL_H */
1959