1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /* memcontrol.c - Memory Controller
3   *
4   * Copyright IBM Corporation, 2007
5   * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6   *
7   * Copyright 2007 OpenVZ SWsoft Inc
8   * Author: Pavel Emelianov <xemul@openvz.org>
9   *
10   * Memory thresholds
11   * Copyright (C) 2009 Nokia Corporation
12   * Author: Kirill A. Shutemov
13   *
14   * Kernel Memory Controller
15   * Copyright (C) 2012 Parallels Inc. and Google Inc.
16   * Authors: Glauber Costa and Suleiman Souhlal
17   *
18   * Native page reclaim
19   * Charge lifetime sanitation
20   * Lockless page tracking & accounting
21   * Unified hierarchy configuration model
22   * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23   *
24   * Per memcg lru locking
25   * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26   */
27  
28  #include <linux/cgroup-defs.h>
29  #include <linux/page_counter.h>
30  #include <linux/memcontrol.h>
31  #include <linux/cgroup.h>
32  #include <linux/sched/mm.h>
33  #include <linux/shmem_fs.h>
34  #include <linux/hugetlb.h>
35  #include <linux/pagemap.h>
36  #include <linux/pagevec.h>
37  #include <linux/vm_event_item.h>
38  #include <linux/smp.h>
39  #include <linux/page-flags.h>
40  #include <linux/backing-dev.h>
41  #include <linux/bit_spinlock.h>
42  #include <linux/rcupdate.h>
43  #include <linux/limits.h>
44  #include <linux/export.h>
45  #include <linux/list.h>
46  #include <linux/mutex.h>
47  #include <linux/rbtree.h>
48  #include <linux/slab.h>
49  #include <linux/swapops.h>
50  #include <linux/spinlock.h>
51  #include <linux/fs.h>
52  #include <linux/seq_file.h>
53  #include <linux/parser.h>
54  #include <linux/vmpressure.h>
55  #include <linux/memremap.h>
56  #include <linux/mm_inline.h>
57  #include <linux/swap_cgroup.h>
58  #include <linux/cpu.h>
59  #include <linux/oom.h>
60  #include <linux/lockdep.h>
61  #include <linux/resume_user_mode.h>
62  #include <linux/psi.h>
63  #include <linux/seq_buf.h>
64  #include <linux/sched/isolation.h>
65  #include <linux/kmemleak.h>
66  #include "internal.h"
67  #include <net/sock.h>
68  #include <net/ip.h>
69  #include "slab.h"
70  #include "memcontrol-v1.h"
71  
72  #include <linux/uaccess.h>
73  
74  #include <trace/events/vmscan.h>
75  
76  struct cgroup_subsys memory_cgrp_subsys __read_mostly;
77  EXPORT_SYMBOL(memory_cgrp_subsys);
78  
79  struct mem_cgroup *root_mem_cgroup __read_mostly;
80  
81  /* Active memory cgroup to use from an interrupt context */
82  DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
83  EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
84  
85  /* Socket memory accounting disabled? */
86  static bool cgroup_memory_nosocket __ro_after_init;
87  
88  /* Kernel memory accounting disabled? */
89  static bool cgroup_memory_nokmem __ro_after_init;
90  
91  /* BPF memory accounting disabled? */
92  static bool cgroup_memory_nobpf __ro_after_init;
93  
94  #ifdef CONFIG_CGROUP_WRITEBACK
95  static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
96  #endif
97  
task_is_dying(void)98  static inline bool task_is_dying(void)
99  {
100  	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
101  		(current->flags & PF_EXITING);
102  }
103  
104  /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)105  struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
106  {
107  	if (!memcg)
108  		memcg = root_mem_cgroup;
109  	return &memcg->vmpressure;
110  }
111  
vmpressure_to_memcg(struct vmpressure * vmpr)112  struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
113  {
114  	return container_of(vmpr, struct mem_cgroup, vmpressure);
115  }
116  
117  #define CURRENT_OBJCG_UPDATE_BIT 0
118  #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
119  
120  static DEFINE_SPINLOCK(objcg_lock);
121  
mem_cgroup_kmem_disabled(void)122  bool mem_cgroup_kmem_disabled(void)
123  {
124  	return cgroup_memory_nokmem;
125  }
126  
127  static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
128  				      unsigned int nr_pages);
129  
obj_cgroup_release(struct percpu_ref * ref)130  static void obj_cgroup_release(struct percpu_ref *ref)
131  {
132  	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
133  	unsigned int nr_bytes;
134  	unsigned int nr_pages;
135  	unsigned long flags;
136  
137  	/*
138  	 * At this point all allocated objects are freed, and
139  	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
140  	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
141  	 *
142  	 * The following sequence can lead to it:
143  	 * 1) CPU0: objcg == stock->cached_objcg
144  	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
145  	 *          PAGE_SIZE bytes are charged
146  	 * 3) CPU1: a process from another memcg is allocating something,
147  	 *          the stock if flushed,
148  	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
149  	 * 5) CPU0: we do release this object,
150  	 *          92 bytes are added to stock->nr_bytes
151  	 * 6) CPU0: stock is flushed,
152  	 *          92 bytes are added to objcg->nr_charged_bytes
153  	 *
154  	 * In the result, nr_charged_bytes == PAGE_SIZE.
155  	 * This page will be uncharged in obj_cgroup_release().
156  	 */
157  	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
158  	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
159  	nr_pages = nr_bytes >> PAGE_SHIFT;
160  
161  	if (nr_pages)
162  		obj_cgroup_uncharge_pages(objcg, nr_pages);
163  
164  	spin_lock_irqsave(&objcg_lock, flags);
165  	list_del(&objcg->list);
166  	spin_unlock_irqrestore(&objcg_lock, flags);
167  
168  	percpu_ref_exit(ref);
169  	kfree_rcu(objcg, rcu);
170  }
171  
obj_cgroup_alloc(void)172  static struct obj_cgroup *obj_cgroup_alloc(void)
173  {
174  	struct obj_cgroup *objcg;
175  	int ret;
176  
177  	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
178  	if (!objcg)
179  		return NULL;
180  
181  	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
182  			      GFP_KERNEL);
183  	if (ret) {
184  		kfree(objcg);
185  		return NULL;
186  	}
187  	INIT_LIST_HEAD(&objcg->list);
188  	return objcg;
189  }
190  
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)191  static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
192  				  struct mem_cgroup *parent)
193  {
194  	struct obj_cgroup *objcg, *iter;
195  
196  	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
197  
198  	spin_lock_irq(&objcg_lock);
199  
200  	/* 1) Ready to reparent active objcg. */
201  	list_add(&objcg->list, &memcg->objcg_list);
202  	/* 2) Reparent active objcg and already reparented objcgs to parent. */
203  	list_for_each_entry(iter, &memcg->objcg_list, list)
204  		WRITE_ONCE(iter->memcg, parent);
205  	/* 3) Move already reparented objcgs to the parent's list */
206  	list_splice(&memcg->objcg_list, &parent->objcg_list);
207  
208  	spin_unlock_irq(&objcg_lock);
209  
210  	percpu_ref_kill(&objcg->refcnt);
211  }
212  
213  /*
214   * A lot of the calls to the cache allocation functions are expected to be
215   * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
216   * conditional to this static branch, we'll have to allow modules that does
217   * kmem_cache_alloc and the such to see this symbol as well
218   */
219  DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
220  EXPORT_SYMBOL(memcg_kmem_online_key);
221  
222  DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
223  EXPORT_SYMBOL(memcg_bpf_enabled_key);
224  
225  /**
226   * mem_cgroup_css_from_folio - css of the memcg associated with a folio
227   * @folio: folio of interest
228   *
229   * If memcg is bound to the default hierarchy, css of the memcg associated
230   * with @folio is returned.  The returned css remains associated with @folio
231   * until it is released.
232   *
233   * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
234   * is returned.
235   */
mem_cgroup_css_from_folio(struct folio * folio)236  struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
237  {
238  	struct mem_cgroup *memcg = folio_memcg(folio);
239  
240  	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
241  		memcg = root_mem_cgroup;
242  
243  	return &memcg->css;
244  }
245  
246  /**
247   * page_cgroup_ino - return inode number of the memcg a page is charged to
248   * @page: the page
249   *
250   * Look up the closest online ancestor of the memory cgroup @page is charged to
251   * and return its inode number or 0 if @page is not charged to any cgroup. It
252   * is safe to call this function without holding a reference to @page.
253   *
254   * Note, this function is inherently racy, because there is nothing to prevent
255   * the cgroup inode from getting torn down and potentially reallocated a moment
256   * after page_cgroup_ino() returns, so it only should be used by callers that
257   * do not care (such as procfs interfaces).
258   */
page_cgroup_ino(struct page * page)259  ino_t page_cgroup_ino(struct page *page)
260  {
261  	struct mem_cgroup *memcg;
262  	unsigned long ino = 0;
263  
264  	rcu_read_lock();
265  	/* page_folio() is racy here, but the entire function is racy anyway */
266  	memcg = folio_memcg_check(page_folio(page));
267  
268  	while (memcg && !(memcg->css.flags & CSS_ONLINE))
269  		memcg = parent_mem_cgroup(memcg);
270  	if (memcg)
271  		ino = cgroup_ino(memcg->css.cgroup);
272  	rcu_read_unlock();
273  	return ino;
274  }
275  
276  /* Subset of node_stat_item for memcg stats */
277  static const unsigned int memcg_node_stat_items[] = {
278  	NR_INACTIVE_ANON,
279  	NR_ACTIVE_ANON,
280  	NR_INACTIVE_FILE,
281  	NR_ACTIVE_FILE,
282  	NR_UNEVICTABLE,
283  	NR_SLAB_RECLAIMABLE_B,
284  	NR_SLAB_UNRECLAIMABLE_B,
285  	WORKINGSET_REFAULT_ANON,
286  	WORKINGSET_REFAULT_FILE,
287  	WORKINGSET_ACTIVATE_ANON,
288  	WORKINGSET_ACTIVATE_FILE,
289  	WORKINGSET_RESTORE_ANON,
290  	WORKINGSET_RESTORE_FILE,
291  	WORKINGSET_NODERECLAIM,
292  	NR_ANON_MAPPED,
293  	NR_FILE_MAPPED,
294  	NR_FILE_PAGES,
295  	NR_FILE_DIRTY,
296  	NR_WRITEBACK,
297  	NR_SHMEM,
298  	NR_SHMEM_THPS,
299  	NR_FILE_THPS,
300  	NR_ANON_THPS,
301  	NR_KERNEL_STACK_KB,
302  	NR_PAGETABLE,
303  	NR_SECONDARY_PAGETABLE,
304  #ifdef CONFIG_SWAP
305  	NR_SWAPCACHE,
306  #endif
307  #ifdef CONFIG_NUMA_BALANCING
308  	PGPROMOTE_SUCCESS,
309  #endif
310  	PGDEMOTE_KSWAPD,
311  	PGDEMOTE_DIRECT,
312  	PGDEMOTE_KHUGEPAGED,
313  };
314  
315  static const unsigned int memcg_stat_items[] = {
316  	MEMCG_SWAP,
317  	MEMCG_SOCK,
318  	MEMCG_PERCPU_B,
319  	MEMCG_VMALLOC,
320  	MEMCG_KMEM,
321  	MEMCG_ZSWAP_B,
322  	MEMCG_ZSWAPPED,
323  };
324  
325  #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
326  #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
327  			   ARRAY_SIZE(memcg_stat_items))
328  #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
329  static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
330  
init_memcg_stats(void)331  static void init_memcg_stats(void)
332  {
333  	u8 i, j = 0;
334  
335  	BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
336  
337  	memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
338  
339  	for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
340  		mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
341  
342  	for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
343  		mem_cgroup_stats_index[memcg_stat_items[i]] = j;
344  }
345  
memcg_stats_index(int idx)346  static inline int memcg_stats_index(int idx)
347  {
348  	return mem_cgroup_stats_index[idx];
349  }
350  
351  struct lruvec_stats_percpu {
352  	/* Local (CPU and cgroup) state */
353  	long state[NR_MEMCG_NODE_STAT_ITEMS];
354  
355  	/* Delta calculation for lockless upward propagation */
356  	long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
357  };
358  
359  struct lruvec_stats {
360  	/* Aggregated (CPU and subtree) state */
361  	long state[NR_MEMCG_NODE_STAT_ITEMS];
362  
363  	/* Non-hierarchical (CPU aggregated) state */
364  	long state_local[NR_MEMCG_NODE_STAT_ITEMS];
365  
366  	/* Pending child counts during tree propagation */
367  	long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
368  };
369  
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)370  unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
371  {
372  	struct mem_cgroup_per_node *pn;
373  	long x;
374  	int i;
375  
376  	if (mem_cgroup_disabled())
377  		return node_page_state(lruvec_pgdat(lruvec), idx);
378  
379  	i = memcg_stats_index(idx);
380  	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
381  		return 0;
382  
383  	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
384  	x = READ_ONCE(pn->lruvec_stats->state[i]);
385  #ifdef CONFIG_SMP
386  	if (x < 0)
387  		x = 0;
388  #endif
389  	return x;
390  }
391  
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)392  unsigned long lruvec_page_state_local(struct lruvec *lruvec,
393  				      enum node_stat_item idx)
394  {
395  	struct mem_cgroup_per_node *pn;
396  	long x;
397  	int i;
398  
399  	if (mem_cgroup_disabled())
400  		return node_page_state(lruvec_pgdat(lruvec), idx);
401  
402  	i = memcg_stats_index(idx);
403  	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
404  		return 0;
405  
406  	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
407  	x = READ_ONCE(pn->lruvec_stats->state_local[i]);
408  #ifdef CONFIG_SMP
409  	if (x < 0)
410  		x = 0;
411  #endif
412  	return x;
413  }
414  
415  /* Subset of vm_event_item to report for memcg event stats */
416  static const unsigned int memcg_vm_event_stat[] = {
417  #ifdef CONFIG_MEMCG_V1
418  	PGPGIN,
419  	PGPGOUT,
420  #endif
421  	PGSCAN_KSWAPD,
422  	PGSCAN_DIRECT,
423  	PGSCAN_KHUGEPAGED,
424  	PGSTEAL_KSWAPD,
425  	PGSTEAL_DIRECT,
426  	PGSTEAL_KHUGEPAGED,
427  	PGFAULT,
428  	PGMAJFAULT,
429  	PGREFILL,
430  	PGACTIVATE,
431  	PGDEACTIVATE,
432  	PGLAZYFREE,
433  	PGLAZYFREED,
434  #ifdef CONFIG_SWAP
435  	SWPIN_ZERO,
436  	SWPOUT_ZERO,
437  #endif
438  #ifdef CONFIG_ZSWAP
439  	ZSWPIN,
440  	ZSWPOUT,
441  	ZSWPWB,
442  #endif
443  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
444  	THP_FAULT_ALLOC,
445  	THP_COLLAPSE_ALLOC,
446  	THP_SWPOUT,
447  	THP_SWPOUT_FALLBACK,
448  #endif
449  #ifdef CONFIG_NUMA_BALANCING
450  	NUMA_PAGE_MIGRATE,
451  	NUMA_PTE_UPDATES,
452  	NUMA_HINT_FAULTS,
453  #endif
454  };
455  
456  #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
457  static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
458  
init_memcg_events(void)459  static void init_memcg_events(void)
460  {
461  	u8 i;
462  
463  	BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
464  
465  	memset(mem_cgroup_events_index, U8_MAX,
466  	       sizeof(mem_cgroup_events_index));
467  
468  	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
469  		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
470  }
471  
memcg_events_index(enum vm_event_item idx)472  static inline int memcg_events_index(enum vm_event_item idx)
473  {
474  	return mem_cgroup_events_index[idx];
475  }
476  
477  struct memcg_vmstats_percpu {
478  	/* Stats updates since the last flush */
479  	unsigned int			stats_updates;
480  
481  	/* Cached pointers for fast iteration in memcg_rstat_updated() */
482  	struct memcg_vmstats_percpu	*parent;
483  	struct memcg_vmstats		*vmstats;
484  
485  	/* The above should fit a single cacheline for memcg_rstat_updated() */
486  
487  	/* Local (CPU and cgroup) page state & events */
488  	long			state[MEMCG_VMSTAT_SIZE];
489  	unsigned long		events[NR_MEMCG_EVENTS];
490  
491  	/* Delta calculation for lockless upward propagation */
492  	long			state_prev[MEMCG_VMSTAT_SIZE];
493  	unsigned long		events_prev[NR_MEMCG_EVENTS];
494  } ____cacheline_aligned;
495  
496  struct memcg_vmstats {
497  	/* Aggregated (CPU and subtree) page state & events */
498  	long			state[MEMCG_VMSTAT_SIZE];
499  	unsigned long		events[NR_MEMCG_EVENTS];
500  
501  	/* Non-hierarchical (CPU aggregated) page state & events */
502  	long			state_local[MEMCG_VMSTAT_SIZE];
503  	unsigned long		events_local[NR_MEMCG_EVENTS];
504  
505  	/* Pending child counts during tree propagation */
506  	long			state_pending[MEMCG_VMSTAT_SIZE];
507  	unsigned long		events_pending[NR_MEMCG_EVENTS];
508  
509  	/* Stats updates since the last flush */
510  	atomic64_t		stats_updates;
511  };
512  
513  /*
514   * memcg and lruvec stats flushing
515   *
516   * Many codepaths leading to stats update or read are performance sensitive and
517   * adding stats flushing in such codepaths is not desirable. So, to optimize the
518   * flushing the kernel does:
519   *
520   * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
521   *    rstat update tree grow unbounded.
522   *
523   * 2) Flush the stats synchronously on reader side only when there are more than
524   *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
525   *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
526   *    only for 2 seconds due to (1).
527   */
528  static void flush_memcg_stats_dwork(struct work_struct *w);
529  static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
530  static u64 flush_last_time;
531  
532  #define FLUSH_TIME (2UL*HZ)
533  
534  /*
535   * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
536   * not rely on this as part of an acquired spinlock_t lock. These functions are
537   * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
538   * is sufficient.
539   */
memcg_stats_lock(void)540  static void memcg_stats_lock(void)
541  {
542  	preempt_disable_nested();
543  	VM_WARN_ON_IRQS_ENABLED();
544  }
545  
__memcg_stats_lock(void)546  static void __memcg_stats_lock(void)
547  {
548  	preempt_disable_nested();
549  }
550  
memcg_stats_unlock(void)551  static void memcg_stats_unlock(void)
552  {
553  	preempt_enable_nested();
554  }
555  
556  
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)557  static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
558  {
559  	return atomic64_read(&vmstats->stats_updates) >
560  		MEMCG_CHARGE_BATCH * num_online_cpus();
561  }
562  
memcg_rstat_updated(struct mem_cgroup * memcg,int val)563  static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
564  {
565  	struct memcg_vmstats_percpu *statc;
566  	int cpu = smp_processor_id();
567  	unsigned int stats_updates;
568  
569  	if (!val)
570  		return;
571  
572  	cgroup_rstat_updated(memcg->css.cgroup, cpu);
573  	statc = this_cpu_ptr(memcg->vmstats_percpu);
574  	for (; statc; statc = statc->parent) {
575  		stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
576  		WRITE_ONCE(statc->stats_updates, stats_updates);
577  		if (stats_updates < MEMCG_CHARGE_BATCH)
578  			continue;
579  
580  		/*
581  		 * If @memcg is already flush-able, increasing stats_updates is
582  		 * redundant. Avoid the overhead of the atomic update.
583  		 */
584  		if (!memcg_vmstats_needs_flush(statc->vmstats))
585  			atomic64_add(stats_updates,
586  				     &statc->vmstats->stats_updates);
587  		WRITE_ONCE(statc->stats_updates, 0);
588  	}
589  }
590  
do_flush_stats(struct mem_cgroup * memcg)591  static void do_flush_stats(struct mem_cgroup *memcg)
592  {
593  	if (mem_cgroup_is_root(memcg))
594  		WRITE_ONCE(flush_last_time, jiffies_64);
595  
596  	cgroup_rstat_flush(memcg->css.cgroup);
597  }
598  
599  /*
600   * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
601   * @memcg: root of the subtree to flush
602   *
603   * Flushing is serialized by the underlying global rstat lock. There is also a
604   * minimum amount of work to be done even if there are no stat updates to flush.
605   * Hence, we only flush the stats if the updates delta exceeds a threshold. This
606   * avoids unnecessary work and contention on the underlying lock.
607   */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)608  void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
609  {
610  	if (mem_cgroup_disabled())
611  		return;
612  
613  	if (!memcg)
614  		memcg = root_mem_cgroup;
615  
616  	if (memcg_vmstats_needs_flush(memcg->vmstats))
617  		do_flush_stats(memcg);
618  }
619  
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)620  void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
621  {
622  	/* Only flush if the periodic flusher is one full cycle late */
623  	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
624  		mem_cgroup_flush_stats(memcg);
625  }
626  
flush_memcg_stats_dwork(struct work_struct * w)627  static void flush_memcg_stats_dwork(struct work_struct *w)
628  {
629  	/*
630  	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
631  	 * in latency-sensitive paths is as cheap as possible.
632  	 */
633  	do_flush_stats(root_mem_cgroup);
634  	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
635  }
636  
memcg_page_state(struct mem_cgroup * memcg,int idx)637  unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
638  {
639  	long x;
640  	int i = memcg_stats_index(idx);
641  
642  	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
643  		return 0;
644  
645  	x = READ_ONCE(memcg->vmstats->state[i]);
646  #ifdef CONFIG_SMP
647  	if (x < 0)
648  		x = 0;
649  #endif
650  	return x;
651  }
652  
653  static int memcg_page_state_unit(int item);
654  
655  /*
656   * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
657   * up non-zero sub-page updates to 1 page as zero page updates are ignored.
658   */
memcg_state_val_in_pages(int idx,int val)659  static int memcg_state_val_in_pages(int idx, int val)
660  {
661  	int unit = memcg_page_state_unit(idx);
662  
663  	if (!val || unit == PAGE_SIZE)
664  		return val;
665  	else
666  		return max(val * unit / PAGE_SIZE, 1UL);
667  }
668  
669  /**
670   * __mod_memcg_state - update cgroup memory statistics
671   * @memcg: the memory cgroup
672   * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
673   * @val: delta to add to the counter, can be negative
674   */
__mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)675  void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
676  		       int val)
677  {
678  	int i = memcg_stats_index(idx);
679  
680  	if (mem_cgroup_disabled())
681  		return;
682  
683  	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
684  		return;
685  
686  	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
687  	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
688  }
689  
690  /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)691  unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
692  {
693  	long x;
694  	int i = memcg_stats_index(idx);
695  
696  	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
697  		return 0;
698  
699  	x = READ_ONCE(memcg->vmstats->state_local[i]);
700  #ifdef CONFIG_SMP
701  	if (x < 0)
702  		x = 0;
703  #endif
704  	return x;
705  }
706  
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)707  static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
708  				     enum node_stat_item idx,
709  				     int val)
710  {
711  	struct mem_cgroup_per_node *pn;
712  	struct mem_cgroup *memcg;
713  	int i = memcg_stats_index(idx);
714  
715  	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
716  		return;
717  
718  	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
719  	memcg = pn->memcg;
720  
721  	/*
722  	 * The caller from rmap relies on disabled preemption because they never
723  	 * update their counter from in-interrupt context. For these two
724  	 * counters we check that the update is never performed from an
725  	 * interrupt context while other caller need to have disabled interrupt.
726  	 */
727  	__memcg_stats_lock();
728  	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
729  		switch (idx) {
730  		case NR_ANON_MAPPED:
731  		case NR_FILE_MAPPED:
732  		case NR_ANON_THPS:
733  			WARN_ON_ONCE(!in_task());
734  			break;
735  		default:
736  			VM_WARN_ON_IRQS_ENABLED();
737  		}
738  	}
739  
740  	/* Update memcg */
741  	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
742  
743  	/* Update lruvec */
744  	__this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
745  
746  	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
747  	memcg_stats_unlock();
748  }
749  
750  /**
751   * __mod_lruvec_state - update lruvec memory statistics
752   * @lruvec: the lruvec
753   * @idx: the stat item
754   * @val: delta to add to the counter, can be negative
755   *
756   * The lruvec is the intersection of the NUMA node and a cgroup. This
757   * function updates the all three counters that are affected by a
758   * change of state at this level: per-node, per-cgroup, per-lruvec.
759   */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)760  void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
761  			int val)
762  {
763  	/* Update node */
764  	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
765  
766  	/* Update memcg and lruvec */
767  	if (!mem_cgroup_disabled())
768  		__mod_memcg_lruvec_state(lruvec, idx, val);
769  }
770  
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)771  void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
772  			     int val)
773  {
774  	struct mem_cgroup *memcg;
775  	pg_data_t *pgdat = folio_pgdat(folio);
776  	struct lruvec *lruvec;
777  
778  	rcu_read_lock();
779  	memcg = folio_memcg(folio);
780  	/* Untracked pages have no memcg, no lruvec. Update only the node */
781  	if (!memcg) {
782  		rcu_read_unlock();
783  		__mod_node_page_state(pgdat, idx, val);
784  		return;
785  	}
786  
787  	lruvec = mem_cgroup_lruvec(memcg, pgdat);
788  	__mod_lruvec_state(lruvec, idx, val);
789  	rcu_read_unlock();
790  }
791  EXPORT_SYMBOL(__lruvec_stat_mod_folio);
792  
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)793  void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
794  {
795  	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
796  	struct mem_cgroup *memcg;
797  	struct lruvec *lruvec;
798  
799  	rcu_read_lock();
800  	memcg = mem_cgroup_from_slab_obj(p);
801  
802  	/*
803  	 * Untracked pages have no memcg, no lruvec. Update only the
804  	 * node. If we reparent the slab objects to the root memcg,
805  	 * when we free the slab object, we need to update the per-memcg
806  	 * vmstats to keep it correct for the root memcg.
807  	 */
808  	if (!memcg) {
809  		__mod_node_page_state(pgdat, idx, val);
810  	} else {
811  		lruvec = mem_cgroup_lruvec(memcg, pgdat);
812  		__mod_lruvec_state(lruvec, idx, val);
813  	}
814  	rcu_read_unlock();
815  }
816  
817  /**
818   * __count_memcg_events - account VM events in a cgroup
819   * @memcg: the memory cgroup
820   * @idx: the event item
821   * @count: the number of events that occurred
822   */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)823  void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
824  			  unsigned long count)
825  {
826  	int i = memcg_events_index(idx);
827  
828  	if (mem_cgroup_disabled())
829  		return;
830  
831  	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
832  		return;
833  
834  	memcg_stats_lock();
835  	__this_cpu_add(memcg->vmstats_percpu->events[i], count);
836  	memcg_rstat_updated(memcg, count);
837  	memcg_stats_unlock();
838  }
839  
memcg_events(struct mem_cgroup * memcg,int event)840  unsigned long memcg_events(struct mem_cgroup *memcg, int event)
841  {
842  	int i = memcg_events_index(event);
843  
844  	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
845  		return 0;
846  
847  	return READ_ONCE(memcg->vmstats->events[i]);
848  }
849  
memcg_events_local(struct mem_cgroup * memcg,int event)850  unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
851  {
852  	int i = memcg_events_index(event);
853  
854  	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
855  		return 0;
856  
857  	return READ_ONCE(memcg->vmstats->events_local[i]);
858  }
859  
mem_cgroup_from_task(struct task_struct * p)860  struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
861  {
862  	/*
863  	 * mm_update_next_owner() may clear mm->owner to NULL
864  	 * if it races with swapoff, page migration, etc.
865  	 * So this can be called with p == NULL.
866  	 */
867  	if (unlikely(!p))
868  		return NULL;
869  
870  	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
871  }
872  EXPORT_SYMBOL(mem_cgroup_from_task);
873  
active_memcg(void)874  static __always_inline struct mem_cgroup *active_memcg(void)
875  {
876  	if (!in_task())
877  		return this_cpu_read(int_active_memcg);
878  	else
879  		return current->active_memcg;
880  }
881  
882  /**
883   * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
884   * @mm: mm from which memcg should be extracted. It can be NULL.
885   *
886   * Obtain a reference on mm->memcg and returns it if successful. If mm
887   * is NULL, then the memcg is chosen as follows:
888   * 1) The active memcg, if set.
889   * 2) current->mm->memcg, if available
890   * 3) root memcg
891   * If mem_cgroup is disabled, NULL is returned.
892   */
get_mem_cgroup_from_mm(struct mm_struct * mm)893  struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
894  {
895  	struct mem_cgroup *memcg;
896  
897  	if (mem_cgroup_disabled())
898  		return NULL;
899  
900  	/*
901  	 * Page cache insertions can happen without an
902  	 * actual mm context, e.g. during disk probing
903  	 * on boot, loopback IO, acct() writes etc.
904  	 *
905  	 * No need to css_get on root memcg as the reference
906  	 * counting is disabled on the root level in the
907  	 * cgroup core. See CSS_NO_REF.
908  	 */
909  	if (unlikely(!mm)) {
910  		memcg = active_memcg();
911  		if (unlikely(memcg)) {
912  			/* remote memcg must hold a ref */
913  			css_get(&memcg->css);
914  			return memcg;
915  		}
916  		mm = current->mm;
917  		if (unlikely(!mm))
918  			return root_mem_cgroup;
919  	}
920  
921  	rcu_read_lock();
922  	do {
923  		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
924  		if (unlikely(!memcg))
925  			memcg = root_mem_cgroup;
926  	} while (!css_tryget(&memcg->css));
927  	rcu_read_unlock();
928  	return memcg;
929  }
930  EXPORT_SYMBOL(get_mem_cgroup_from_mm);
931  
932  /**
933   * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
934   */
get_mem_cgroup_from_current(void)935  struct mem_cgroup *get_mem_cgroup_from_current(void)
936  {
937  	struct mem_cgroup *memcg;
938  
939  	if (mem_cgroup_disabled())
940  		return NULL;
941  
942  again:
943  	rcu_read_lock();
944  	memcg = mem_cgroup_from_task(current);
945  	if (!css_tryget(&memcg->css)) {
946  		rcu_read_unlock();
947  		goto again;
948  	}
949  	rcu_read_unlock();
950  	return memcg;
951  }
952  
953  /**
954   * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
955   * @folio: folio from which memcg should be extracted.
956   */
get_mem_cgroup_from_folio(struct folio * folio)957  struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
958  {
959  	struct mem_cgroup *memcg = folio_memcg(folio);
960  
961  	if (mem_cgroup_disabled())
962  		return NULL;
963  
964  	rcu_read_lock();
965  	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
966  		memcg = root_mem_cgroup;
967  	rcu_read_unlock();
968  	return memcg;
969  }
970  
971  /**
972   * mem_cgroup_iter - iterate over memory cgroup hierarchy
973   * @root: hierarchy root
974   * @prev: previously returned memcg, NULL on first invocation
975   * @reclaim: cookie for shared reclaim walks, NULL for full walks
976   *
977   * Returns references to children of the hierarchy below @root, or
978   * @root itself, or %NULL after a full round-trip.
979   *
980   * Caller must pass the return value in @prev on subsequent
981   * invocations for reference counting, or use mem_cgroup_iter_break()
982   * to cancel a hierarchy walk before the round-trip is complete.
983   *
984   * Reclaimers can specify a node in @reclaim to divide up the memcgs
985   * in the hierarchy among all concurrent reclaimers operating on the
986   * same node.
987   */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)988  struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
989  				   struct mem_cgroup *prev,
990  				   struct mem_cgroup_reclaim_cookie *reclaim)
991  {
992  	struct mem_cgroup_reclaim_iter *iter;
993  	struct cgroup_subsys_state *css;
994  	struct mem_cgroup *pos;
995  	struct mem_cgroup *next;
996  
997  	if (mem_cgroup_disabled())
998  		return NULL;
999  
1000  	if (!root)
1001  		root = root_mem_cgroup;
1002  
1003  	rcu_read_lock();
1004  restart:
1005  	next = NULL;
1006  
1007  	if (reclaim) {
1008  		int gen;
1009  		int nid = reclaim->pgdat->node_id;
1010  
1011  		iter = &root->nodeinfo[nid]->iter;
1012  		gen = atomic_read(&iter->generation);
1013  
1014  		/*
1015  		 * On start, join the current reclaim iteration cycle.
1016  		 * Exit when a concurrent walker completes it.
1017  		 */
1018  		if (!prev)
1019  			reclaim->generation = gen;
1020  		else if (reclaim->generation != gen)
1021  			goto out_unlock;
1022  
1023  		pos = READ_ONCE(iter->position);
1024  	} else
1025  		pos = prev;
1026  
1027  	css = pos ? &pos->css : NULL;
1028  
1029  	while ((css = css_next_descendant_pre(css, &root->css))) {
1030  		/*
1031  		 * Verify the css and acquire a reference.  The root
1032  		 * is provided by the caller, so we know it's alive
1033  		 * and kicking, and don't take an extra reference.
1034  		 */
1035  		if (css == &root->css || css_tryget(css))
1036  			break;
1037  	}
1038  
1039  	next = mem_cgroup_from_css(css);
1040  
1041  	if (reclaim) {
1042  		/*
1043  		 * The position could have already been updated by a competing
1044  		 * thread, so check that the value hasn't changed since we read
1045  		 * it to avoid reclaiming from the same cgroup twice.
1046  		 */
1047  		if (cmpxchg(&iter->position, pos, next) != pos) {
1048  			if (css && css != &root->css)
1049  				css_put(css);
1050  			goto restart;
1051  		}
1052  
1053  		if (!next) {
1054  			atomic_inc(&iter->generation);
1055  
1056  			/*
1057  			 * Reclaimers share the hierarchy walk, and a
1058  			 * new one might jump in right at the end of
1059  			 * the hierarchy - make sure they see at least
1060  			 * one group and restart from the beginning.
1061  			 */
1062  			if (!prev)
1063  				goto restart;
1064  		}
1065  	}
1066  
1067  out_unlock:
1068  	rcu_read_unlock();
1069  	if (prev && prev != root)
1070  		css_put(&prev->css);
1071  
1072  	return next;
1073  }
1074  
1075  /**
1076   * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1077   * @root: hierarchy root
1078   * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1079   */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1080  void mem_cgroup_iter_break(struct mem_cgroup *root,
1081  			   struct mem_cgroup *prev)
1082  {
1083  	if (!root)
1084  		root = root_mem_cgroup;
1085  	if (prev && prev != root)
1086  		css_put(&prev->css);
1087  }
1088  
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1089  static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1090  					struct mem_cgroup *dead_memcg)
1091  {
1092  	struct mem_cgroup_reclaim_iter *iter;
1093  	struct mem_cgroup_per_node *mz;
1094  	int nid;
1095  
1096  	for_each_node(nid) {
1097  		mz = from->nodeinfo[nid];
1098  		iter = &mz->iter;
1099  		cmpxchg(&iter->position, dead_memcg, NULL);
1100  	}
1101  }
1102  
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1103  static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1104  {
1105  	struct mem_cgroup *memcg = dead_memcg;
1106  	struct mem_cgroup *last;
1107  
1108  	do {
1109  		__invalidate_reclaim_iterators(memcg, dead_memcg);
1110  		last = memcg;
1111  	} while ((memcg = parent_mem_cgroup(memcg)));
1112  
1113  	/*
1114  	 * When cgroup1 non-hierarchy mode is used,
1115  	 * parent_mem_cgroup() does not walk all the way up to the
1116  	 * cgroup root (root_mem_cgroup). So we have to handle
1117  	 * dead_memcg from cgroup root separately.
1118  	 */
1119  	if (!mem_cgroup_is_root(last))
1120  		__invalidate_reclaim_iterators(root_mem_cgroup,
1121  						dead_memcg);
1122  }
1123  
1124  /**
1125   * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1126   * @memcg: hierarchy root
1127   * @fn: function to call for each task
1128   * @arg: argument passed to @fn
1129   *
1130   * This function iterates over tasks attached to @memcg or to any of its
1131   * descendants and calls @fn for each task. If @fn returns a non-zero
1132   * value, the function breaks the iteration loop. Otherwise, it will iterate
1133   * over all tasks and return 0.
1134   *
1135   * This function must not be called for the root memory cgroup.
1136   */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1137  void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1138  			   int (*fn)(struct task_struct *, void *), void *arg)
1139  {
1140  	struct mem_cgroup *iter;
1141  	int ret = 0;
1142  
1143  	BUG_ON(mem_cgroup_is_root(memcg));
1144  
1145  	for_each_mem_cgroup_tree(iter, memcg) {
1146  		struct css_task_iter it;
1147  		struct task_struct *task;
1148  
1149  		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1150  		while (!ret && (task = css_task_iter_next(&it)))
1151  			ret = fn(task, arg);
1152  		css_task_iter_end(&it);
1153  		if (ret) {
1154  			mem_cgroup_iter_break(memcg, iter);
1155  			break;
1156  		}
1157  	}
1158  }
1159  
1160  #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1161  void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1162  {
1163  	struct mem_cgroup *memcg;
1164  
1165  	if (mem_cgroup_disabled())
1166  		return;
1167  
1168  	memcg = folio_memcg(folio);
1169  
1170  	if (!memcg)
1171  		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1172  	else
1173  		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1174  }
1175  #endif
1176  
1177  /**
1178   * folio_lruvec_lock - Lock the lruvec for a folio.
1179   * @folio: Pointer to the folio.
1180   *
1181   * These functions are safe to use under any of the following conditions:
1182   * - folio locked
1183   * - folio_test_lru false
1184   * - folio_memcg_lock()
1185   * - folio frozen (refcount of 0)
1186   *
1187   * Return: The lruvec this folio is on with its lock held.
1188   */
folio_lruvec_lock(struct folio * folio)1189  struct lruvec *folio_lruvec_lock(struct folio *folio)
1190  {
1191  	struct lruvec *lruvec = folio_lruvec(folio);
1192  
1193  	spin_lock(&lruvec->lru_lock);
1194  	lruvec_memcg_debug(lruvec, folio);
1195  
1196  	return lruvec;
1197  }
1198  
1199  /**
1200   * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1201   * @folio: Pointer to the folio.
1202   *
1203   * These functions are safe to use under any of the following conditions:
1204   * - folio locked
1205   * - folio_test_lru false
1206   * - folio_memcg_lock()
1207   * - folio frozen (refcount of 0)
1208   *
1209   * Return: The lruvec this folio is on with its lock held and interrupts
1210   * disabled.
1211   */
folio_lruvec_lock_irq(struct folio * folio)1212  struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1213  {
1214  	struct lruvec *lruvec = folio_lruvec(folio);
1215  
1216  	spin_lock_irq(&lruvec->lru_lock);
1217  	lruvec_memcg_debug(lruvec, folio);
1218  
1219  	return lruvec;
1220  }
1221  
1222  /**
1223   * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1224   * @folio: Pointer to the folio.
1225   * @flags: Pointer to irqsave flags.
1226   *
1227   * These functions are safe to use under any of the following conditions:
1228   * - folio locked
1229   * - folio_test_lru false
1230   * - folio_memcg_lock()
1231   * - folio frozen (refcount of 0)
1232   *
1233   * Return: The lruvec this folio is on with its lock held and interrupts
1234   * disabled.
1235   */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1236  struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1237  		unsigned long *flags)
1238  {
1239  	struct lruvec *lruvec = folio_lruvec(folio);
1240  
1241  	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1242  	lruvec_memcg_debug(lruvec, folio);
1243  
1244  	return lruvec;
1245  }
1246  
1247  /**
1248   * mem_cgroup_update_lru_size - account for adding or removing an lru page
1249   * @lruvec: mem_cgroup per zone lru vector
1250   * @lru: index of lru list the page is sitting on
1251   * @zid: zone id of the accounted pages
1252   * @nr_pages: positive when adding or negative when removing
1253   *
1254   * This function must be called under lru_lock, just before a page is added
1255   * to or just after a page is removed from an lru list.
1256   */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1257  void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1258  				int zid, int nr_pages)
1259  {
1260  	struct mem_cgroup_per_node *mz;
1261  	unsigned long *lru_size;
1262  	long size;
1263  
1264  	if (mem_cgroup_disabled())
1265  		return;
1266  
1267  	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1268  	lru_size = &mz->lru_zone_size[zid][lru];
1269  
1270  	if (nr_pages < 0)
1271  		*lru_size += nr_pages;
1272  
1273  	size = *lru_size;
1274  	if (WARN_ONCE(size < 0,
1275  		"%s(%p, %d, %d): lru_size %ld\n",
1276  		__func__, lruvec, lru, nr_pages, size)) {
1277  		VM_BUG_ON(1);
1278  		*lru_size = 0;
1279  	}
1280  
1281  	if (nr_pages > 0)
1282  		*lru_size += nr_pages;
1283  }
1284  
1285  /**
1286   * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1287   * @memcg: the memory cgroup
1288   *
1289   * Returns the maximum amount of memory @mem can be charged with, in
1290   * pages.
1291   */
mem_cgroup_margin(struct mem_cgroup * memcg)1292  static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1293  {
1294  	unsigned long margin = 0;
1295  	unsigned long count;
1296  	unsigned long limit;
1297  
1298  	count = page_counter_read(&memcg->memory);
1299  	limit = READ_ONCE(memcg->memory.max);
1300  	if (count < limit)
1301  		margin = limit - count;
1302  
1303  	if (do_memsw_account()) {
1304  		count = page_counter_read(&memcg->memsw);
1305  		limit = READ_ONCE(memcg->memsw.max);
1306  		if (count < limit)
1307  			margin = min(margin, limit - count);
1308  		else
1309  			margin = 0;
1310  	}
1311  
1312  	return margin;
1313  }
1314  
1315  struct memory_stat {
1316  	const char *name;
1317  	unsigned int idx;
1318  };
1319  
1320  static const struct memory_stat memory_stats[] = {
1321  	{ "anon",			NR_ANON_MAPPED			},
1322  	{ "file",			NR_FILE_PAGES			},
1323  	{ "kernel",			MEMCG_KMEM			},
1324  	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1325  	{ "pagetables",			NR_PAGETABLE			},
1326  	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1327  	{ "percpu",			MEMCG_PERCPU_B			},
1328  	{ "sock",			MEMCG_SOCK			},
1329  	{ "vmalloc",			MEMCG_VMALLOC			},
1330  	{ "shmem",			NR_SHMEM			},
1331  #ifdef CONFIG_ZSWAP
1332  	{ "zswap",			MEMCG_ZSWAP_B			},
1333  	{ "zswapped",			MEMCG_ZSWAPPED			},
1334  #endif
1335  	{ "file_mapped",		NR_FILE_MAPPED			},
1336  	{ "file_dirty",			NR_FILE_DIRTY			},
1337  	{ "file_writeback",		NR_WRITEBACK			},
1338  #ifdef CONFIG_SWAP
1339  	{ "swapcached",			NR_SWAPCACHE			},
1340  #endif
1341  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1342  	{ "anon_thp",			NR_ANON_THPS			},
1343  	{ "file_thp",			NR_FILE_THPS			},
1344  	{ "shmem_thp",			NR_SHMEM_THPS			},
1345  #endif
1346  	{ "inactive_anon",		NR_INACTIVE_ANON		},
1347  	{ "active_anon",		NR_ACTIVE_ANON			},
1348  	{ "inactive_file",		NR_INACTIVE_FILE		},
1349  	{ "active_file",		NR_ACTIVE_FILE			},
1350  	{ "unevictable",		NR_UNEVICTABLE			},
1351  	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1352  	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1353  
1354  	/* The memory events */
1355  	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1356  	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1357  	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1358  	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1359  	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1360  	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1361  	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1362  
1363  	{ "pgdemote_kswapd",		PGDEMOTE_KSWAPD		},
1364  	{ "pgdemote_direct",		PGDEMOTE_DIRECT		},
1365  	{ "pgdemote_khugepaged",	PGDEMOTE_KHUGEPAGED	},
1366  #ifdef CONFIG_NUMA_BALANCING
1367  	{ "pgpromote_success",		PGPROMOTE_SUCCESS	},
1368  #endif
1369  };
1370  
1371  /* The actual unit of the state item, not the same as the output unit */
memcg_page_state_unit(int item)1372  static int memcg_page_state_unit(int item)
1373  {
1374  	switch (item) {
1375  	case MEMCG_PERCPU_B:
1376  	case MEMCG_ZSWAP_B:
1377  	case NR_SLAB_RECLAIMABLE_B:
1378  	case NR_SLAB_UNRECLAIMABLE_B:
1379  		return 1;
1380  	case NR_KERNEL_STACK_KB:
1381  		return SZ_1K;
1382  	default:
1383  		return PAGE_SIZE;
1384  	}
1385  }
1386  
1387  /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_output_unit(int item)1388  static int memcg_page_state_output_unit(int item)
1389  {
1390  	/*
1391  	 * Workingset state is actually in pages, but we export it to userspace
1392  	 * as a scalar count of events, so special case it here.
1393  	 *
1394  	 * Demotion and promotion activities are exported in pages, consistent
1395  	 * with their global counterparts.
1396  	 */
1397  	switch (item) {
1398  	case WORKINGSET_REFAULT_ANON:
1399  	case WORKINGSET_REFAULT_FILE:
1400  	case WORKINGSET_ACTIVATE_ANON:
1401  	case WORKINGSET_ACTIVATE_FILE:
1402  	case WORKINGSET_RESTORE_ANON:
1403  	case WORKINGSET_RESTORE_FILE:
1404  	case WORKINGSET_NODERECLAIM:
1405  	case PGDEMOTE_KSWAPD:
1406  	case PGDEMOTE_DIRECT:
1407  	case PGDEMOTE_KHUGEPAGED:
1408  #ifdef CONFIG_NUMA_BALANCING
1409  	case PGPROMOTE_SUCCESS:
1410  #endif
1411  		return 1;
1412  	default:
1413  		return memcg_page_state_unit(item);
1414  	}
1415  }
1416  
memcg_page_state_output(struct mem_cgroup * memcg,int item)1417  unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1418  {
1419  	return memcg_page_state(memcg, item) *
1420  		memcg_page_state_output_unit(item);
1421  }
1422  
memcg_page_state_local_output(struct mem_cgroup * memcg,int item)1423  unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1424  {
1425  	return memcg_page_state_local(memcg, item) *
1426  		memcg_page_state_output_unit(item);
1427  }
1428  
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1429  static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1430  {
1431  	int i;
1432  
1433  	/*
1434  	 * Provide statistics on the state of the memory subsystem as
1435  	 * well as cumulative event counters that show past behavior.
1436  	 *
1437  	 * This list is ordered following a combination of these gradients:
1438  	 * 1) generic big picture -> specifics and details
1439  	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1440  	 *
1441  	 * Current memory state:
1442  	 */
1443  	mem_cgroup_flush_stats(memcg);
1444  
1445  	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1446  		u64 size;
1447  
1448  		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1449  		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1450  
1451  		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1452  			size += memcg_page_state_output(memcg,
1453  							NR_SLAB_RECLAIMABLE_B);
1454  			seq_buf_printf(s, "slab %llu\n", size);
1455  		}
1456  	}
1457  
1458  	/* Accumulated memory events */
1459  	seq_buf_printf(s, "pgscan %lu\n",
1460  		       memcg_events(memcg, PGSCAN_KSWAPD) +
1461  		       memcg_events(memcg, PGSCAN_DIRECT) +
1462  		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1463  	seq_buf_printf(s, "pgsteal %lu\n",
1464  		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1465  		       memcg_events(memcg, PGSTEAL_DIRECT) +
1466  		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1467  
1468  	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1469  #ifdef CONFIG_MEMCG_V1
1470  		if (memcg_vm_event_stat[i] == PGPGIN ||
1471  		    memcg_vm_event_stat[i] == PGPGOUT)
1472  			continue;
1473  #endif
1474  		seq_buf_printf(s, "%s %lu\n",
1475  			       vm_event_name(memcg_vm_event_stat[i]),
1476  			       memcg_events(memcg, memcg_vm_event_stat[i]));
1477  	}
1478  }
1479  
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1480  static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1481  {
1482  	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1483  		memcg_stat_format(memcg, s);
1484  	else
1485  		memcg1_stat_format(memcg, s);
1486  	if (seq_buf_has_overflowed(s))
1487  		pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1488  }
1489  
1490  /**
1491   * mem_cgroup_print_oom_context: Print OOM information relevant to
1492   * memory controller.
1493   * @memcg: The memory cgroup that went over limit
1494   * @p: Task that is going to be killed
1495   *
1496   * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1497   * enabled
1498   */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1499  void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1500  {
1501  	rcu_read_lock();
1502  
1503  	if (memcg) {
1504  		pr_cont(",oom_memcg=");
1505  		pr_cont_cgroup_path(memcg->css.cgroup);
1506  	} else
1507  		pr_cont(",global_oom");
1508  	if (p) {
1509  		pr_cont(",task_memcg=");
1510  		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1511  	}
1512  	rcu_read_unlock();
1513  }
1514  
1515  /**
1516   * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1517   * memory controller.
1518   * @memcg: The memory cgroup that went over limit
1519   */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1520  void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1521  {
1522  	/* Use static buffer, for the caller is holding oom_lock. */
1523  	static char buf[PAGE_SIZE];
1524  	struct seq_buf s;
1525  
1526  	lockdep_assert_held(&oom_lock);
1527  
1528  	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1529  		K((u64)page_counter_read(&memcg->memory)),
1530  		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1531  	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1532  		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1533  			K((u64)page_counter_read(&memcg->swap)),
1534  			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1535  #ifdef CONFIG_MEMCG_V1
1536  	else {
1537  		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1538  			K((u64)page_counter_read(&memcg->memsw)),
1539  			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1540  		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1541  			K((u64)page_counter_read(&memcg->kmem)),
1542  			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1543  	}
1544  #endif
1545  
1546  	pr_info("Memory cgroup stats for ");
1547  	pr_cont_cgroup_path(memcg->css.cgroup);
1548  	pr_cont(":");
1549  	seq_buf_init(&s, buf, sizeof(buf));
1550  	memory_stat_format(memcg, &s);
1551  	seq_buf_do_printk(&s, KERN_INFO);
1552  }
1553  
1554  /*
1555   * Return the memory (and swap, if configured) limit for a memcg.
1556   */
mem_cgroup_get_max(struct mem_cgroup * memcg)1557  unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1558  {
1559  	unsigned long max = READ_ONCE(memcg->memory.max);
1560  
1561  	if (do_memsw_account()) {
1562  		if (mem_cgroup_swappiness(memcg)) {
1563  			/* Calculate swap excess capacity from memsw limit */
1564  			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1565  
1566  			max += min(swap, (unsigned long)total_swap_pages);
1567  		}
1568  	} else {
1569  		if (mem_cgroup_swappiness(memcg))
1570  			max += min(READ_ONCE(memcg->swap.max),
1571  				   (unsigned long)total_swap_pages);
1572  	}
1573  	return max;
1574  }
1575  
mem_cgroup_size(struct mem_cgroup * memcg)1576  unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1577  {
1578  	return page_counter_read(&memcg->memory);
1579  }
1580  
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1581  static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1582  				     int order)
1583  {
1584  	struct oom_control oc = {
1585  		.zonelist = NULL,
1586  		.nodemask = NULL,
1587  		.memcg = memcg,
1588  		.gfp_mask = gfp_mask,
1589  		.order = order,
1590  	};
1591  	bool ret = true;
1592  
1593  	if (mutex_lock_killable(&oom_lock))
1594  		return true;
1595  
1596  	if (mem_cgroup_margin(memcg) >= (1 << order))
1597  		goto unlock;
1598  
1599  	/*
1600  	 * A few threads which were not waiting at mutex_lock_killable() can
1601  	 * fail to bail out. Therefore, check again after holding oom_lock.
1602  	 */
1603  	ret = task_is_dying() || out_of_memory(&oc);
1604  
1605  unlock:
1606  	mutex_unlock(&oom_lock);
1607  	return ret;
1608  }
1609  
1610  /*
1611   * Returns true if successfully killed one or more processes. Though in some
1612   * corner cases it can return true even without killing any process.
1613   */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1614  static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1615  {
1616  	bool locked, ret;
1617  
1618  	if (order > PAGE_ALLOC_COSTLY_ORDER)
1619  		return false;
1620  
1621  	memcg_memory_event(memcg, MEMCG_OOM);
1622  
1623  	if (!memcg1_oom_prepare(memcg, &locked))
1624  		return false;
1625  
1626  	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1627  
1628  	memcg1_oom_finish(memcg, locked);
1629  
1630  	return ret;
1631  }
1632  
1633  /**
1634   * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1635   * @victim: task to be killed by the OOM killer
1636   * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1637   *
1638   * Returns a pointer to a memory cgroup, which has to be cleaned up
1639   * by killing all belonging OOM-killable tasks.
1640   *
1641   * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1642   */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1643  struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1644  					    struct mem_cgroup *oom_domain)
1645  {
1646  	struct mem_cgroup *oom_group = NULL;
1647  	struct mem_cgroup *memcg;
1648  
1649  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1650  		return NULL;
1651  
1652  	if (!oom_domain)
1653  		oom_domain = root_mem_cgroup;
1654  
1655  	rcu_read_lock();
1656  
1657  	memcg = mem_cgroup_from_task(victim);
1658  	if (mem_cgroup_is_root(memcg))
1659  		goto out;
1660  
1661  	/*
1662  	 * If the victim task has been asynchronously moved to a different
1663  	 * memory cgroup, we might end up killing tasks outside oom_domain.
1664  	 * In this case it's better to ignore memory.group.oom.
1665  	 */
1666  	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1667  		goto out;
1668  
1669  	/*
1670  	 * Traverse the memory cgroup hierarchy from the victim task's
1671  	 * cgroup up to the OOMing cgroup (or root) to find the
1672  	 * highest-level memory cgroup with oom.group set.
1673  	 */
1674  	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1675  		if (READ_ONCE(memcg->oom_group))
1676  			oom_group = memcg;
1677  
1678  		if (memcg == oom_domain)
1679  			break;
1680  	}
1681  
1682  	if (oom_group)
1683  		css_get(&oom_group->css);
1684  out:
1685  	rcu_read_unlock();
1686  
1687  	return oom_group;
1688  }
1689  
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1690  void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1691  {
1692  	pr_info("Tasks in ");
1693  	pr_cont_cgroup_path(memcg->css.cgroup);
1694  	pr_cont(" are going to be killed due to memory.oom.group set\n");
1695  }
1696  
1697  struct memcg_stock_pcp {
1698  	local_lock_t stock_lock;
1699  	struct mem_cgroup *cached; /* this never be root cgroup */
1700  	unsigned int nr_pages;
1701  
1702  	struct obj_cgroup *cached_objcg;
1703  	struct pglist_data *cached_pgdat;
1704  	unsigned int nr_bytes;
1705  	int nr_slab_reclaimable_b;
1706  	int nr_slab_unreclaimable_b;
1707  
1708  	struct work_struct work;
1709  	unsigned long flags;
1710  #define FLUSHING_CACHED_CHARGE	0
1711  };
1712  static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1713  	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
1714  };
1715  static DEFINE_MUTEX(percpu_charge_mutex);
1716  
1717  static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
1718  static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
1719  				     struct mem_cgroup *root_memcg);
1720  
1721  /**
1722   * consume_stock: Try to consume stocked charge on this cpu.
1723   * @memcg: memcg to consume from.
1724   * @nr_pages: how many pages to charge.
1725   *
1726   * The charges will only happen if @memcg matches the current cpu's memcg
1727   * stock, and at least @nr_pages are available in that stock.  Failure to
1728   * service an allocation will refill the stock.
1729   *
1730   * returns true if successful, false otherwise.
1731   */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1732  static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1733  {
1734  	struct memcg_stock_pcp *stock;
1735  	unsigned int stock_pages;
1736  	unsigned long flags;
1737  	bool ret = false;
1738  
1739  	if (nr_pages > MEMCG_CHARGE_BATCH)
1740  		return ret;
1741  
1742  	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1743  
1744  	stock = this_cpu_ptr(&memcg_stock);
1745  	stock_pages = READ_ONCE(stock->nr_pages);
1746  	if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
1747  		WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
1748  		ret = true;
1749  	}
1750  
1751  	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1752  
1753  	return ret;
1754  }
1755  
1756  /*
1757   * Returns stocks cached in percpu and reset cached information.
1758   */
drain_stock(struct memcg_stock_pcp * stock)1759  static void drain_stock(struct memcg_stock_pcp *stock)
1760  {
1761  	unsigned int stock_pages = READ_ONCE(stock->nr_pages);
1762  	struct mem_cgroup *old = READ_ONCE(stock->cached);
1763  
1764  	if (!old)
1765  		return;
1766  
1767  	if (stock_pages) {
1768  		page_counter_uncharge(&old->memory, stock_pages);
1769  		if (do_memsw_account())
1770  			page_counter_uncharge(&old->memsw, stock_pages);
1771  
1772  		WRITE_ONCE(stock->nr_pages, 0);
1773  	}
1774  
1775  	css_put(&old->css);
1776  	WRITE_ONCE(stock->cached, NULL);
1777  }
1778  
drain_local_stock(struct work_struct * dummy)1779  static void drain_local_stock(struct work_struct *dummy)
1780  {
1781  	struct memcg_stock_pcp *stock;
1782  	struct obj_cgroup *old = NULL;
1783  	unsigned long flags;
1784  
1785  	/*
1786  	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
1787  	 * drain_stock races is that we always operate on local CPU stock
1788  	 * here with IRQ disabled
1789  	 */
1790  	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1791  
1792  	stock = this_cpu_ptr(&memcg_stock);
1793  	old = drain_obj_stock(stock);
1794  	drain_stock(stock);
1795  	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1796  
1797  	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1798  	obj_cgroup_put(old);
1799  }
1800  
1801  /*
1802   * Cache charges(val) to local per_cpu area.
1803   * This will be consumed by consume_stock() function, later.
1804   */
__refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1805  static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1806  {
1807  	struct memcg_stock_pcp *stock;
1808  	unsigned int stock_pages;
1809  
1810  	stock = this_cpu_ptr(&memcg_stock);
1811  	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
1812  		drain_stock(stock);
1813  		css_get(&memcg->css);
1814  		WRITE_ONCE(stock->cached, memcg);
1815  	}
1816  	stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
1817  	WRITE_ONCE(stock->nr_pages, stock_pages);
1818  
1819  	if (stock_pages > MEMCG_CHARGE_BATCH)
1820  		drain_stock(stock);
1821  }
1822  
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1823  static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1824  {
1825  	unsigned long flags;
1826  
1827  	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1828  	__refill_stock(memcg, nr_pages);
1829  	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1830  }
1831  
1832  /*
1833   * Drains all per-CPU charge caches for given root_memcg resp. subtree
1834   * of the hierarchy under it.
1835   */
drain_all_stock(struct mem_cgroup * root_memcg)1836  void drain_all_stock(struct mem_cgroup *root_memcg)
1837  {
1838  	int cpu, curcpu;
1839  
1840  	/* If someone's already draining, avoid adding running more workers. */
1841  	if (!mutex_trylock(&percpu_charge_mutex))
1842  		return;
1843  	/*
1844  	 * Notify other cpus that system-wide "drain" is running
1845  	 * We do not care about races with the cpu hotplug because cpu down
1846  	 * as well as workers from this path always operate on the local
1847  	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1848  	 */
1849  	migrate_disable();
1850  	curcpu = smp_processor_id();
1851  	for_each_online_cpu(cpu) {
1852  		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1853  		struct mem_cgroup *memcg;
1854  		bool flush = false;
1855  
1856  		rcu_read_lock();
1857  		memcg = READ_ONCE(stock->cached);
1858  		if (memcg && READ_ONCE(stock->nr_pages) &&
1859  		    mem_cgroup_is_descendant(memcg, root_memcg))
1860  			flush = true;
1861  		else if (obj_stock_flush_required(stock, root_memcg))
1862  			flush = true;
1863  		rcu_read_unlock();
1864  
1865  		if (flush &&
1866  		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1867  			if (cpu == curcpu)
1868  				drain_local_stock(&stock->work);
1869  			else if (!cpu_is_isolated(cpu))
1870  				schedule_work_on(cpu, &stock->work);
1871  		}
1872  	}
1873  	migrate_enable();
1874  	mutex_unlock(&percpu_charge_mutex);
1875  }
1876  
memcg_hotplug_cpu_dead(unsigned int cpu)1877  static int memcg_hotplug_cpu_dead(unsigned int cpu)
1878  {
1879  	struct memcg_stock_pcp *stock;
1880  
1881  	stock = &per_cpu(memcg_stock, cpu);
1882  	drain_stock(stock);
1883  
1884  	return 0;
1885  }
1886  
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)1887  static unsigned long reclaim_high(struct mem_cgroup *memcg,
1888  				  unsigned int nr_pages,
1889  				  gfp_t gfp_mask)
1890  {
1891  	unsigned long nr_reclaimed = 0;
1892  
1893  	do {
1894  		unsigned long pflags;
1895  
1896  		if (page_counter_read(&memcg->memory) <=
1897  		    READ_ONCE(memcg->memory.high))
1898  			continue;
1899  
1900  		memcg_memory_event(memcg, MEMCG_HIGH);
1901  
1902  		psi_memstall_enter(&pflags);
1903  		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
1904  							gfp_mask,
1905  							MEMCG_RECLAIM_MAY_SWAP,
1906  							NULL);
1907  		psi_memstall_leave(&pflags);
1908  	} while ((memcg = parent_mem_cgroup(memcg)) &&
1909  		 !mem_cgroup_is_root(memcg));
1910  
1911  	return nr_reclaimed;
1912  }
1913  
high_work_func(struct work_struct * work)1914  static void high_work_func(struct work_struct *work)
1915  {
1916  	struct mem_cgroup *memcg;
1917  
1918  	memcg = container_of(work, struct mem_cgroup, high_work);
1919  	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1920  }
1921  
1922  /*
1923   * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
1924   * enough to still cause a significant slowdown in most cases, while still
1925   * allowing diagnostics and tracing to proceed without becoming stuck.
1926   */
1927  #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
1928  
1929  /*
1930   * When calculating the delay, we use these either side of the exponentiation to
1931   * maintain precision and scale to a reasonable number of jiffies (see the table
1932   * below.
1933   *
1934   * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
1935   *   overage ratio to a delay.
1936   * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
1937   *   proposed penalty in order to reduce to a reasonable number of jiffies, and
1938   *   to produce a reasonable delay curve.
1939   *
1940   * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
1941   * reasonable delay curve compared to precision-adjusted overage, not
1942   * penalising heavily at first, but still making sure that growth beyond the
1943   * limit penalises misbehaviour cgroups by slowing them down exponentially. For
1944   * example, with a high of 100 megabytes:
1945   *
1946   *  +-------+------------------------+
1947   *  | usage | time to allocate in ms |
1948   *  +-------+------------------------+
1949   *  | 100M  |                      0 |
1950   *  | 101M  |                      6 |
1951   *  | 102M  |                     25 |
1952   *  | 103M  |                     57 |
1953   *  | 104M  |                    102 |
1954   *  | 105M  |                    159 |
1955   *  | 106M  |                    230 |
1956   *  | 107M  |                    313 |
1957   *  | 108M  |                    409 |
1958   *  | 109M  |                    518 |
1959   *  | 110M  |                    639 |
1960   *  | 111M  |                    774 |
1961   *  | 112M  |                    921 |
1962   *  | 113M  |                   1081 |
1963   *  | 114M  |                   1254 |
1964   *  | 115M  |                   1439 |
1965   *  | 116M  |                   1638 |
1966   *  | 117M  |                   1849 |
1967   *  | 118M  |                   2000 |
1968   *  | 119M  |                   2000 |
1969   *  | 120M  |                   2000 |
1970   *  +-------+------------------------+
1971   */
1972   #define MEMCG_DELAY_PRECISION_SHIFT 20
1973   #define MEMCG_DELAY_SCALING_SHIFT 14
1974  
calculate_overage(unsigned long usage,unsigned long high)1975  static u64 calculate_overage(unsigned long usage, unsigned long high)
1976  {
1977  	u64 overage;
1978  
1979  	if (usage <= high)
1980  		return 0;
1981  
1982  	/*
1983  	 * Prevent division by 0 in overage calculation by acting as if
1984  	 * it was a threshold of 1 page
1985  	 */
1986  	high = max(high, 1UL);
1987  
1988  	overage = usage - high;
1989  	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
1990  	return div64_u64(overage, high);
1991  }
1992  
mem_find_max_overage(struct mem_cgroup * memcg)1993  static u64 mem_find_max_overage(struct mem_cgroup *memcg)
1994  {
1995  	u64 overage, max_overage = 0;
1996  
1997  	do {
1998  		overage = calculate_overage(page_counter_read(&memcg->memory),
1999  					    READ_ONCE(memcg->memory.high));
2000  		max_overage = max(overage, max_overage);
2001  	} while ((memcg = parent_mem_cgroup(memcg)) &&
2002  		 !mem_cgroup_is_root(memcg));
2003  
2004  	return max_overage;
2005  }
2006  
swap_find_max_overage(struct mem_cgroup * memcg)2007  static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2008  {
2009  	u64 overage, max_overage = 0;
2010  
2011  	do {
2012  		overage = calculate_overage(page_counter_read(&memcg->swap),
2013  					    READ_ONCE(memcg->swap.high));
2014  		if (overage)
2015  			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2016  		max_overage = max(overage, max_overage);
2017  	} while ((memcg = parent_mem_cgroup(memcg)) &&
2018  		 !mem_cgroup_is_root(memcg));
2019  
2020  	return max_overage;
2021  }
2022  
2023  /*
2024   * Get the number of jiffies that we should penalise a mischievous cgroup which
2025   * is exceeding its memory.high by checking both it and its ancestors.
2026   */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2027  static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2028  					  unsigned int nr_pages,
2029  					  u64 max_overage)
2030  {
2031  	unsigned long penalty_jiffies;
2032  
2033  	if (!max_overage)
2034  		return 0;
2035  
2036  	/*
2037  	 * We use overage compared to memory.high to calculate the number of
2038  	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2039  	 * fairly lenient on small overages, and increasingly harsh when the
2040  	 * memcg in question makes it clear that it has no intention of stopping
2041  	 * its crazy behaviour, so we exponentially increase the delay based on
2042  	 * overage amount.
2043  	 */
2044  	penalty_jiffies = max_overage * max_overage * HZ;
2045  	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2046  	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2047  
2048  	/*
2049  	 * Factor in the task's own contribution to the overage, such that four
2050  	 * N-sized allocations are throttled approximately the same as one
2051  	 * 4N-sized allocation.
2052  	 *
2053  	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2054  	 * larger the current charge patch is than that.
2055  	 */
2056  	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2057  }
2058  
2059  /*
2060   * Reclaims memory over the high limit. Called directly from
2061   * try_charge() (context permitting), as well as from the userland
2062   * return path where reclaim is always able to block.
2063   */
mem_cgroup_handle_over_high(gfp_t gfp_mask)2064  void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2065  {
2066  	unsigned long penalty_jiffies;
2067  	unsigned long pflags;
2068  	unsigned long nr_reclaimed;
2069  	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2070  	int nr_retries = MAX_RECLAIM_RETRIES;
2071  	struct mem_cgroup *memcg;
2072  	bool in_retry = false;
2073  
2074  	if (likely(!nr_pages))
2075  		return;
2076  
2077  	memcg = get_mem_cgroup_from_mm(current->mm);
2078  	current->memcg_nr_pages_over_high = 0;
2079  
2080  retry_reclaim:
2081  	/*
2082  	 * Bail if the task is already exiting. Unlike memory.max,
2083  	 * memory.high enforcement isn't as strict, and there is no
2084  	 * OOM killer involved, which means the excess could already
2085  	 * be much bigger (and still growing) than it could for
2086  	 * memory.max; the dying task could get stuck in fruitless
2087  	 * reclaim for a long time, which isn't desirable.
2088  	 */
2089  	if (task_is_dying())
2090  		goto out;
2091  
2092  	/*
2093  	 * The allocating task should reclaim at least the batch size, but for
2094  	 * subsequent retries we only want to do what's necessary to prevent oom
2095  	 * or breaching resource isolation.
2096  	 *
2097  	 * This is distinct from memory.max or page allocator behaviour because
2098  	 * memory.high is currently batched, whereas memory.max and the page
2099  	 * allocator run every time an allocation is made.
2100  	 */
2101  	nr_reclaimed = reclaim_high(memcg,
2102  				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2103  				    gfp_mask);
2104  
2105  	/*
2106  	 * memory.high is breached and reclaim is unable to keep up. Throttle
2107  	 * allocators proactively to slow down excessive growth.
2108  	 */
2109  	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2110  					       mem_find_max_overage(memcg));
2111  
2112  	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2113  						swap_find_max_overage(memcg));
2114  
2115  	/*
2116  	 * Clamp the max delay per usermode return so as to still keep the
2117  	 * application moving forwards and also permit diagnostics, albeit
2118  	 * extremely slowly.
2119  	 */
2120  	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2121  
2122  	/*
2123  	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2124  	 * that it's not even worth doing, in an attempt to be nice to those who
2125  	 * go only a small amount over their memory.high value and maybe haven't
2126  	 * been aggressively reclaimed enough yet.
2127  	 */
2128  	if (penalty_jiffies <= HZ / 100)
2129  		goto out;
2130  
2131  	/*
2132  	 * If reclaim is making forward progress but we're still over
2133  	 * memory.high, we want to encourage that rather than doing allocator
2134  	 * throttling.
2135  	 */
2136  	if (nr_reclaimed || nr_retries--) {
2137  		in_retry = true;
2138  		goto retry_reclaim;
2139  	}
2140  
2141  	/*
2142  	 * Reclaim didn't manage to push usage below the limit, slow
2143  	 * this allocating task down.
2144  	 *
2145  	 * If we exit early, we're guaranteed to die (since
2146  	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2147  	 * need to account for any ill-begotten jiffies to pay them off later.
2148  	 */
2149  	psi_memstall_enter(&pflags);
2150  	schedule_timeout_killable(penalty_jiffies);
2151  	psi_memstall_leave(&pflags);
2152  
2153  out:
2154  	css_put(&memcg->css);
2155  }
2156  
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2157  int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2158  		     unsigned int nr_pages)
2159  {
2160  	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2161  	int nr_retries = MAX_RECLAIM_RETRIES;
2162  	struct mem_cgroup *mem_over_limit;
2163  	struct page_counter *counter;
2164  	unsigned long nr_reclaimed;
2165  	bool passed_oom = false;
2166  	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2167  	bool drained = false;
2168  	bool raised_max_event = false;
2169  	unsigned long pflags;
2170  
2171  retry:
2172  	if (consume_stock(memcg, nr_pages))
2173  		return 0;
2174  
2175  	if (!do_memsw_account() ||
2176  	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2177  		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2178  			goto done_restock;
2179  		if (do_memsw_account())
2180  			page_counter_uncharge(&memcg->memsw, batch);
2181  		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2182  	} else {
2183  		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2184  		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2185  	}
2186  
2187  	if (batch > nr_pages) {
2188  		batch = nr_pages;
2189  		goto retry;
2190  	}
2191  
2192  	/*
2193  	 * Prevent unbounded recursion when reclaim operations need to
2194  	 * allocate memory. This might exceed the limits temporarily,
2195  	 * but we prefer facilitating memory reclaim and getting back
2196  	 * under the limit over triggering OOM kills in these cases.
2197  	 */
2198  	if (unlikely(current->flags & PF_MEMALLOC))
2199  		goto force;
2200  
2201  	if (unlikely(task_in_memcg_oom(current)))
2202  		goto nomem;
2203  
2204  	if (!gfpflags_allow_blocking(gfp_mask))
2205  		goto nomem;
2206  
2207  	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2208  	raised_max_event = true;
2209  
2210  	psi_memstall_enter(&pflags);
2211  	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2212  						    gfp_mask, reclaim_options, NULL);
2213  	psi_memstall_leave(&pflags);
2214  
2215  	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2216  		goto retry;
2217  
2218  	if (!drained) {
2219  		drain_all_stock(mem_over_limit);
2220  		drained = true;
2221  		goto retry;
2222  	}
2223  
2224  	if (gfp_mask & __GFP_NORETRY)
2225  		goto nomem;
2226  	/*
2227  	 * Even though the limit is exceeded at this point, reclaim
2228  	 * may have been able to free some pages.  Retry the charge
2229  	 * before killing the task.
2230  	 *
2231  	 * Only for regular pages, though: huge pages are rather
2232  	 * unlikely to succeed so close to the limit, and we fall back
2233  	 * to regular pages anyway in case of failure.
2234  	 */
2235  	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2236  		goto retry;
2237  	/*
2238  	 * At task move, charge accounts can be doubly counted. So, it's
2239  	 * better to wait until the end of task_move if something is going on.
2240  	 */
2241  	if (memcg1_wait_acct_move(mem_over_limit))
2242  		goto retry;
2243  
2244  	if (nr_retries--)
2245  		goto retry;
2246  
2247  	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2248  		goto nomem;
2249  
2250  	/* Avoid endless loop for tasks bypassed by the oom killer */
2251  	if (passed_oom && task_is_dying())
2252  		goto nomem;
2253  
2254  	/*
2255  	 * keep retrying as long as the memcg oom killer is able to make
2256  	 * a forward progress or bypass the charge if the oom killer
2257  	 * couldn't make any progress.
2258  	 */
2259  	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2260  			   get_order(nr_pages * PAGE_SIZE))) {
2261  		passed_oom = true;
2262  		nr_retries = MAX_RECLAIM_RETRIES;
2263  		goto retry;
2264  	}
2265  nomem:
2266  	/*
2267  	 * Memcg doesn't have a dedicated reserve for atomic
2268  	 * allocations. But like the global atomic pool, we need to
2269  	 * put the burden of reclaim on regular allocation requests
2270  	 * and let these go through as privileged allocations.
2271  	 */
2272  	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2273  		return -ENOMEM;
2274  force:
2275  	/*
2276  	 * If the allocation has to be enforced, don't forget to raise
2277  	 * a MEMCG_MAX event.
2278  	 */
2279  	if (!raised_max_event)
2280  		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2281  
2282  	/*
2283  	 * The allocation either can't fail or will lead to more memory
2284  	 * being freed very soon.  Allow memory usage go over the limit
2285  	 * temporarily by force charging it.
2286  	 */
2287  	page_counter_charge(&memcg->memory, nr_pages);
2288  	if (do_memsw_account())
2289  		page_counter_charge(&memcg->memsw, nr_pages);
2290  
2291  	return 0;
2292  
2293  done_restock:
2294  	if (batch > nr_pages)
2295  		refill_stock(memcg, batch - nr_pages);
2296  
2297  	/*
2298  	 * If the hierarchy is above the normal consumption range, schedule
2299  	 * reclaim on returning to userland.  We can perform reclaim here
2300  	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2301  	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2302  	 * not recorded as it most likely matches current's and won't
2303  	 * change in the meantime.  As high limit is checked again before
2304  	 * reclaim, the cost of mismatch is negligible.
2305  	 */
2306  	do {
2307  		bool mem_high, swap_high;
2308  
2309  		mem_high = page_counter_read(&memcg->memory) >
2310  			READ_ONCE(memcg->memory.high);
2311  		swap_high = page_counter_read(&memcg->swap) >
2312  			READ_ONCE(memcg->swap.high);
2313  
2314  		/* Don't bother a random interrupted task */
2315  		if (!in_task()) {
2316  			if (mem_high) {
2317  				schedule_work(&memcg->high_work);
2318  				break;
2319  			}
2320  			continue;
2321  		}
2322  
2323  		if (mem_high || swap_high) {
2324  			/*
2325  			 * The allocating tasks in this cgroup will need to do
2326  			 * reclaim or be throttled to prevent further growth
2327  			 * of the memory or swap footprints.
2328  			 *
2329  			 * Target some best-effort fairness between the tasks,
2330  			 * and distribute reclaim work and delay penalties
2331  			 * based on how much each task is actually allocating.
2332  			 */
2333  			current->memcg_nr_pages_over_high += batch;
2334  			set_notify_resume(current);
2335  			break;
2336  		}
2337  	} while ((memcg = parent_mem_cgroup(memcg)));
2338  
2339  	/*
2340  	 * Reclaim is set up above to be called from the userland
2341  	 * return path. But also attempt synchronous reclaim to avoid
2342  	 * excessive overrun while the task is still inside the
2343  	 * kernel. If this is successful, the return path will see it
2344  	 * when it rechecks the overage and simply bail out.
2345  	 */
2346  	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2347  	    !(current->flags & PF_MEMALLOC) &&
2348  	    gfpflags_allow_blocking(gfp_mask))
2349  		mem_cgroup_handle_over_high(gfp_mask);
2350  	return 0;
2351  }
2352  
2353  /**
2354   * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2355   * @memcg: memcg previously charged.
2356   * @nr_pages: number of pages previously charged.
2357   */
mem_cgroup_cancel_charge(struct mem_cgroup * memcg,unsigned int nr_pages)2358  void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2359  {
2360  	if (mem_cgroup_is_root(memcg))
2361  		return;
2362  
2363  	page_counter_uncharge(&memcg->memory, nr_pages);
2364  	if (do_memsw_account())
2365  		page_counter_uncharge(&memcg->memsw, nr_pages);
2366  }
2367  
commit_charge(struct folio * folio,struct mem_cgroup * memcg)2368  static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2369  {
2370  	VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2371  	/*
2372  	 * Any of the following ensures page's memcg stability:
2373  	 *
2374  	 * - the page lock
2375  	 * - LRU isolation
2376  	 * - folio_memcg_lock()
2377  	 * - exclusive reference
2378  	 * - mem_cgroup_trylock_pages()
2379  	 */
2380  	folio->memcg_data = (unsigned long)memcg;
2381  }
2382  
2383  /**
2384   * mem_cgroup_commit_charge - commit a previously successful try_charge().
2385   * @folio: folio to commit the charge to.
2386   * @memcg: memcg previously charged.
2387   */
mem_cgroup_commit_charge(struct folio * folio,struct mem_cgroup * memcg)2388  void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2389  {
2390  	css_get(&memcg->css);
2391  	commit_charge(folio, memcg);
2392  	memcg1_commit_charge(folio, memcg);
2393  }
2394  
__mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2395  static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
2396  				       struct pglist_data *pgdat,
2397  				       enum node_stat_item idx, int nr)
2398  {
2399  	struct mem_cgroup *memcg;
2400  	struct lruvec *lruvec;
2401  
2402  	rcu_read_lock();
2403  	memcg = obj_cgroup_memcg(objcg);
2404  	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2405  	__mod_memcg_lruvec_state(lruvec, idx, nr);
2406  	rcu_read_unlock();
2407  }
2408  
2409  static __always_inline
mem_cgroup_from_obj_folio(struct folio * folio,void * p)2410  struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2411  {
2412  	/*
2413  	 * Slab objects are accounted individually, not per-page.
2414  	 * Memcg membership data for each individual object is saved in
2415  	 * slab->obj_exts.
2416  	 */
2417  	if (folio_test_slab(folio)) {
2418  		struct slabobj_ext *obj_exts;
2419  		struct slab *slab;
2420  		unsigned int off;
2421  
2422  		slab = folio_slab(folio);
2423  		obj_exts = slab_obj_exts(slab);
2424  		if (!obj_exts)
2425  			return NULL;
2426  
2427  		off = obj_to_index(slab->slab_cache, slab, p);
2428  		if (obj_exts[off].objcg)
2429  			return obj_cgroup_memcg(obj_exts[off].objcg);
2430  
2431  		return NULL;
2432  	}
2433  
2434  	/*
2435  	 * folio_memcg_check() is used here, because in theory we can encounter
2436  	 * a folio where the slab flag has been cleared already, but
2437  	 * slab->obj_exts has not been freed yet
2438  	 * folio_memcg_check() will guarantee that a proper memory
2439  	 * cgroup pointer or NULL will be returned.
2440  	 */
2441  	return folio_memcg_check(folio);
2442  }
2443  
2444  /*
2445   * Returns a pointer to the memory cgroup to which the kernel object is charged.
2446   * It is not suitable for objects allocated using vmalloc().
2447   *
2448   * A passed kernel object must be a slab object or a generic kernel page.
2449   *
2450   * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2451   * cgroup_mutex, etc.
2452   */
mem_cgroup_from_slab_obj(void * p)2453  struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2454  {
2455  	if (mem_cgroup_disabled())
2456  		return NULL;
2457  
2458  	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2459  }
2460  
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2461  static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2462  {
2463  	struct obj_cgroup *objcg = NULL;
2464  
2465  	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2466  		objcg = rcu_dereference(memcg->objcg);
2467  		if (likely(objcg && obj_cgroup_tryget(objcg)))
2468  			break;
2469  		objcg = NULL;
2470  	}
2471  	return objcg;
2472  }
2473  
current_objcg_update(void)2474  static struct obj_cgroup *current_objcg_update(void)
2475  {
2476  	struct mem_cgroup *memcg;
2477  	struct obj_cgroup *old, *objcg = NULL;
2478  
2479  	do {
2480  		/* Atomically drop the update bit. */
2481  		old = xchg(&current->objcg, NULL);
2482  		if (old) {
2483  			old = (struct obj_cgroup *)
2484  				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2485  			obj_cgroup_put(old);
2486  
2487  			old = NULL;
2488  		}
2489  
2490  		/* If new objcg is NULL, no reason for the second atomic update. */
2491  		if (!current->mm || (current->flags & PF_KTHREAD))
2492  			return NULL;
2493  
2494  		/*
2495  		 * Release the objcg pointer from the previous iteration,
2496  		 * if try_cmpxcg() below fails.
2497  		 */
2498  		if (unlikely(objcg)) {
2499  			obj_cgroup_put(objcg);
2500  			objcg = NULL;
2501  		}
2502  
2503  		/*
2504  		 * Obtain the new objcg pointer. The current task can be
2505  		 * asynchronously moved to another memcg and the previous
2506  		 * memcg can be offlined. So let's get the memcg pointer
2507  		 * and try get a reference to objcg under a rcu read lock.
2508  		 */
2509  
2510  		rcu_read_lock();
2511  		memcg = mem_cgroup_from_task(current);
2512  		objcg = __get_obj_cgroup_from_memcg(memcg);
2513  		rcu_read_unlock();
2514  
2515  		/*
2516  		 * Try set up a new objcg pointer atomically. If it
2517  		 * fails, it means the update flag was set concurrently, so
2518  		 * the whole procedure should be repeated.
2519  		 */
2520  	} while (!try_cmpxchg(&current->objcg, &old, objcg));
2521  
2522  	return objcg;
2523  }
2524  
current_obj_cgroup(void)2525  __always_inline struct obj_cgroup *current_obj_cgroup(void)
2526  {
2527  	struct mem_cgroup *memcg;
2528  	struct obj_cgroup *objcg;
2529  
2530  	if (in_task()) {
2531  		memcg = current->active_memcg;
2532  		if (unlikely(memcg))
2533  			goto from_memcg;
2534  
2535  		objcg = READ_ONCE(current->objcg);
2536  		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2537  			objcg = current_objcg_update();
2538  		/*
2539  		 * Objcg reference is kept by the task, so it's safe
2540  		 * to use the objcg by the current task.
2541  		 */
2542  		return objcg;
2543  	}
2544  
2545  	memcg = this_cpu_read(int_active_memcg);
2546  	if (unlikely(memcg))
2547  		goto from_memcg;
2548  
2549  	return NULL;
2550  
2551  from_memcg:
2552  	objcg = NULL;
2553  	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2554  		/*
2555  		 * Memcg pointer is protected by scope (see set_active_memcg())
2556  		 * and is pinning the corresponding objcg, so objcg can't go
2557  		 * away and can be used within the scope without any additional
2558  		 * protection.
2559  		 */
2560  		objcg = rcu_dereference_check(memcg->objcg, 1);
2561  		if (likely(objcg))
2562  			break;
2563  	}
2564  
2565  	return objcg;
2566  }
2567  
get_obj_cgroup_from_folio(struct folio * folio)2568  struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2569  {
2570  	struct obj_cgroup *objcg;
2571  
2572  	if (!memcg_kmem_online())
2573  		return NULL;
2574  
2575  	if (folio_memcg_kmem(folio)) {
2576  		objcg = __folio_objcg(folio);
2577  		obj_cgroup_get(objcg);
2578  	} else {
2579  		struct mem_cgroup *memcg;
2580  
2581  		rcu_read_lock();
2582  		memcg = __folio_memcg(folio);
2583  		if (memcg)
2584  			objcg = __get_obj_cgroup_from_memcg(memcg);
2585  		else
2586  			objcg = NULL;
2587  		rcu_read_unlock();
2588  	}
2589  	return objcg;
2590  }
2591  
2592  /*
2593   * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2594   * @objcg: object cgroup to uncharge
2595   * @nr_pages: number of pages to uncharge
2596   */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)2597  static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2598  				      unsigned int nr_pages)
2599  {
2600  	struct mem_cgroup *memcg;
2601  
2602  	memcg = get_mem_cgroup_from_objcg(objcg);
2603  
2604  	mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2605  	memcg1_account_kmem(memcg, -nr_pages);
2606  	refill_stock(memcg, nr_pages);
2607  
2608  	css_put(&memcg->css);
2609  }
2610  
2611  /*
2612   * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2613   * @objcg: object cgroup to charge
2614   * @gfp: reclaim mode
2615   * @nr_pages: number of pages to charge
2616   *
2617   * Returns 0 on success, an error code on failure.
2618   */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)2619  static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2620  				   unsigned int nr_pages)
2621  {
2622  	struct mem_cgroup *memcg;
2623  	int ret;
2624  
2625  	memcg = get_mem_cgroup_from_objcg(objcg);
2626  
2627  	ret = try_charge_memcg(memcg, gfp, nr_pages);
2628  	if (ret)
2629  		goto out;
2630  
2631  	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2632  	memcg1_account_kmem(memcg, nr_pages);
2633  out:
2634  	css_put(&memcg->css);
2635  
2636  	return ret;
2637  }
2638  
2639  /**
2640   * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2641   * @page: page to charge
2642   * @gfp: reclaim mode
2643   * @order: allocation order
2644   *
2645   * Returns 0 on success, an error code on failure.
2646   */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)2647  int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2648  {
2649  	struct obj_cgroup *objcg;
2650  	int ret = 0;
2651  
2652  	objcg = current_obj_cgroup();
2653  	if (objcg) {
2654  		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2655  		if (!ret) {
2656  			obj_cgroup_get(objcg);
2657  			page->memcg_data = (unsigned long)objcg |
2658  				MEMCG_DATA_KMEM;
2659  			return 0;
2660  		}
2661  	}
2662  	return ret;
2663  }
2664  
2665  /**
2666   * __memcg_kmem_uncharge_page: uncharge a kmem page
2667   * @page: page to uncharge
2668   * @order: allocation order
2669   */
__memcg_kmem_uncharge_page(struct page * page,int order)2670  void __memcg_kmem_uncharge_page(struct page *page, int order)
2671  {
2672  	struct folio *folio = page_folio(page);
2673  	struct obj_cgroup *objcg;
2674  	unsigned int nr_pages = 1 << order;
2675  
2676  	if (!folio_memcg_kmem(folio))
2677  		return;
2678  
2679  	objcg = __folio_objcg(folio);
2680  	obj_cgroup_uncharge_pages(objcg, nr_pages);
2681  	folio->memcg_data = 0;
2682  	obj_cgroup_put(objcg);
2683  }
2684  
mod_objcg_state(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2685  static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2686  		     enum node_stat_item idx, int nr)
2687  {
2688  	struct memcg_stock_pcp *stock;
2689  	struct obj_cgroup *old = NULL;
2690  	unsigned long flags;
2691  	int *bytes;
2692  
2693  	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2694  	stock = this_cpu_ptr(&memcg_stock);
2695  
2696  	/*
2697  	 * Save vmstat data in stock and skip vmstat array update unless
2698  	 * accumulating over a page of vmstat data or when pgdat or idx
2699  	 * changes.
2700  	 */
2701  	if (READ_ONCE(stock->cached_objcg) != objcg) {
2702  		old = drain_obj_stock(stock);
2703  		obj_cgroup_get(objcg);
2704  		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2705  				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2706  		WRITE_ONCE(stock->cached_objcg, objcg);
2707  		stock->cached_pgdat = pgdat;
2708  	} else if (stock->cached_pgdat != pgdat) {
2709  		/* Flush the existing cached vmstat data */
2710  		struct pglist_data *oldpg = stock->cached_pgdat;
2711  
2712  		if (stock->nr_slab_reclaimable_b) {
2713  			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2714  					  stock->nr_slab_reclaimable_b);
2715  			stock->nr_slab_reclaimable_b = 0;
2716  		}
2717  		if (stock->nr_slab_unreclaimable_b) {
2718  			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2719  					  stock->nr_slab_unreclaimable_b);
2720  			stock->nr_slab_unreclaimable_b = 0;
2721  		}
2722  		stock->cached_pgdat = pgdat;
2723  	}
2724  
2725  	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2726  					       : &stock->nr_slab_unreclaimable_b;
2727  	/*
2728  	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2729  	 * cached locally at least once before pushing it out.
2730  	 */
2731  	if (!*bytes) {
2732  		*bytes = nr;
2733  		nr = 0;
2734  	} else {
2735  		*bytes += nr;
2736  		if (abs(*bytes) > PAGE_SIZE) {
2737  			nr = *bytes;
2738  			*bytes = 0;
2739  		} else {
2740  			nr = 0;
2741  		}
2742  	}
2743  	if (nr)
2744  		__mod_objcg_mlstate(objcg, pgdat, idx, nr);
2745  
2746  	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2747  	obj_cgroup_put(old);
2748  }
2749  
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)2750  static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
2751  {
2752  	struct memcg_stock_pcp *stock;
2753  	unsigned long flags;
2754  	bool ret = false;
2755  
2756  	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2757  
2758  	stock = this_cpu_ptr(&memcg_stock);
2759  	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2760  		stock->nr_bytes -= nr_bytes;
2761  		ret = true;
2762  	}
2763  
2764  	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2765  
2766  	return ret;
2767  }
2768  
drain_obj_stock(struct memcg_stock_pcp * stock)2769  static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2770  {
2771  	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2772  
2773  	if (!old)
2774  		return NULL;
2775  
2776  	if (stock->nr_bytes) {
2777  		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2778  		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2779  
2780  		if (nr_pages) {
2781  			struct mem_cgroup *memcg;
2782  
2783  			memcg = get_mem_cgroup_from_objcg(old);
2784  
2785  			mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2786  			memcg1_account_kmem(memcg, -nr_pages);
2787  			__refill_stock(memcg, nr_pages);
2788  
2789  			css_put(&memcg->css);
2790  		}
2791  
2792  		/*
2793  		 * The leftover is flushed to the centralized per-memcg value.
2794  		 * On the next attempt to refill obj stock it will be moved
2795  		 * to a per-cpu stock (probably, on an other CPU), see
2796  		 * refill_obj_stock().
2797  		 *
2798  		 * How often it's flushed is a trade-off between the memory
2799  		 * limit enforcement accuracy and potential CPU contention,
2800  		 * so it might be changed in the future.
2801  		 */
2802  		atomic_add(nr_bytes, &old->nr_charged_bytes);
2803  		stock->nr_bytes = 0;
2804  	}
2805  
2806  	/*
2807  	 * Flush the vmstat data in current stock
2808  	 */
2809  	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2810  		if (stock->nr_slab_reclaimable_b) {
2811  			__mod_objcg_mlstate(old, stock->cached_pgdat,
2812  					  NR_SLAB_RECLAIMABLE_B,
2813  					  stock->nr_slab_reclaimable_b);
2814  			stock->nr_slab_reclaimable_b = 0;
2815  		}
2816  		if (stock->nr_slab_unreclaimable_b) {
2817  			__mod_objcg_mlstate(old, stock->cached_pgdat,
2818  					  NR_SLAB_UNRECLAIMABLE_B,
2819  					  stock->nr_slab_unreclaimable_b);
2820  			stock->nr_slab_unreclaimable_b = 0;
2821  		}
2822  		stock->cached_pgdat = NULL;
2823  	}
2824  
2825  	WRITE_ONCE(stock->cached_objcg, NULL);
2826  	/*
2827  	 * The `old' objects needs to be released by the caller via
2828  	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
2829  	 */
2830  	return old;
2831  }
2832  
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2833  static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2834  				     struct mem_cgroup *root_memcg)
2835  {
2836  	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
2837  	struct mem_cgroup *memcg;
2838  
2839  	if (objcg) {
2840  		memcg = obj_cgroup_memcg(objcg);
2841  		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
2842  			return true;
2843  	}
2844  
2845  	return false;
2846  }
2847  
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)2848  static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2849  			     bool allow_uncharge)
2850  {
2851  	struct memcg_stock_pcp *stock;
2852  	struct obj_cgroup *old = NULL;
2853  	unsigned long flags;
2854  	unsigned int nr_pages = 0;
2855  
2856  	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2857  
2858  	stock = this_cpu_ptr(&memcg_stock);
2859  	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
2860  		old = drain_obj_stock(stock);
2861  		obj_cgroup_get(objcg);
2862  		WRITE_ONCE(stock->cached_objcg, objcg);
2863  		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2864  				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2865  		allow_uncharge = true;	/* Allow uncharge when objcg changes */
2866  	}
2867  	stock->nr_bytes += nr_bytes;
2868  
2869  	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
2870  		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2871  		stock->nr_bytes &= (PAGE_SIZE - 1);
2872  	}
2873  
2874  	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2875  	obj_cgroup_put(old);
2876  
2877  	if (nr_pages)
2878  		obj_cgroup_uncharge_pages(objcg, nr_pages);
2879  }
2880  
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)2881  int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
2882  {
2883  	unsigned int nr_pages, nr_bytes;
2884  	int ret;
2885  
2886  	if (consume_obj_stock(objcg, size))
2887  		return 0;
2888  
2889  	/*
2890  	 * In theory, objcg->nr_charged_bytes can have enough
2891  	 * pre-charged bytes to satisfy the allocation. However,
2892  	 * flushing objcg->nr_charged_bytes requires two atomic
2893  	 * operations, and objcg->nr_charged_bytes can't be big.
2894  	 * The shared objcg->nr_charged_bytes can also become a
2895  	 * performance bottleneck if all tasks of the same memcg are
2896  	 * trying to update it. So it's better to ignore it and try
2897  	 * grab some new pages. The stock's nr_bytes will be flushed to
2898  	 * objcg->nr_charged_bytes later on when objcg changes.
2899  	 *
2900  	 * The stock's nr_bytes may contain enough pre-charged bytes
2901  	 * to allow one less page from being charged, but we can't rely
2902  	 * on the pre-charged bytes not being changed outside of
2903  	 * consume_obj_stock() or refill_obj_stock(). So ignore those
2904  	 * pre-charged bytes as well when charging pages. To avoid a
2905  	 * page uncharge right after a page charge, we set the
2906  	 * allow_uncharge flag to false when calling refill_obj_stock()
2907  	 * to temporarily allow the pre-charged bytes to exceed the page
2908  	 * size limit. The maximum reachable value of the pre-charged
2909  	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
2910  	 * race.
2911  	 */
2912  	nr_pages = size >> PAGE_SHIFT;
2913  	nr_bytes = size & (PAGE_SIZE - 1);
2914  
2915  	if (nr_bytes)
2916  		nr_pages += 1;
2917  
2918  	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
2919  	if (!ret && nr_bytes)
2920  		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
2921  
2922  	return ret;
2923  }
2924  
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)2925  void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
2926  {
2927  	refill_obj_stock(objcg, size, true);
2928  }
2929  
obj_full_size(struct kmem_cache * s)2930  static inline size_t obj_full_size(struct kmem_cache *s)
2931  {
2932  	/*
2933  	 * For each accounted object there is an extra space which is used
2934  	 * to store obj_cgroup membership. Charge it too.
2935  	 */
2936  	return s->size + sizeof(struct obj_cgroup *);
2937  }
2938  
__memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)2939  bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2940  				  gfp_t flags, size_t size, void **p)
2941  {
2942  	struct obj_cgroup *objcg;
2943  	struct slab *slab;
2944  	unsigned long off;
2945  	size_t i;
2946  
2947  	/*
2948  	 * The obtained objcg pointer is safe to use within the current scope,
2949  	 * defined by current task or set_active_memcg() pair.
2950  	 * obj_cgroup_get() is used to get a permanent reference.
2951  	 */
2952  	objcg = current_obj_cgroup();
2953  	if (!objcg)
2954  		return true;
2955  
2956  	/*
2957  	 * slab_alloc_node() avoids the NULL check, so we might be called with a
2958  	 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
2959  	 * the whole requested size.
2960  	 * return success as there's nothing to free back
2961  	 */
2962  	if (unlikely(*p == NULL))
2963  		return true;
2964  
2965  	flags &= gfp_allowed_mask;
2966  
2967  	if (lru) {
2968  		int ret;
2969  		struct mem_cgroup *memcg;
2970  
2971  		memcg = get_mem_cgroup_from_objcg(objcg);
2972  		ret = memcg_list_lru_alloc(memcg, lru, flags);
2973  		css_put(&memcg->css);
2974  
2975  		if (ret)
2976  			return false;
2977  	}
2978  
2979  	if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
2980  		return false;
2981  
2982  	for (i = 0; i < size; i++) {
2983  		slab = virt_to_slab(p[i]);
2984  
2985  		if (!slab_obj_exts(slab) &&
2986  		    alloc_slab_obj_exts(slab, s, flags, false)) {
2987  			obj_cgroup_uncharge(objcg, obj_full_size(s));
2988  			continue;
2989  		}
2990  
2991  		off = obj_to_index(s, slab, p[i]);
2992  		obj_cgroup_get(objcg);
2993  		slab_obj_exts(slab)[off].objcg = objcg;
2994  		mod_objcg_state(objcg, slab_pgdat(slab),
2995  				cache_vmstat_idx(s), obj_full_size(s));
2996  	}
2997  
2998  	return true;
2999  }
3000  
__memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects,struct slabobj_ext * obj_exts)3001  void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3002  			    void **p, int objects, struct slabobj_ext *obj_exts)
3003  {
3004  	for (int i = 0; i < objects; i++) {
3005  		struct obj_cgroup *objcg;
3006  		unsigned int off;
3007  
3008  		off = obj_to_index(s, slab, p[i]);
3009  		objcg = obj_exts[off].objcg;
3010  		if (!objcg)
3011  			continue;
3012  
3013  		obj_exts[off].objcg = NULL;
3014  		obj_cgroup_uncharge(objcg, obj_full_size(s));
3015  		mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
3016  				-obj_full_size(s));
3017  		obj_cgroup_put(objcg);
3018  	}
3019  }
3020  
3021  /*
3022   * Because folio_memcg(head) is not set on tails, set it now.
3023   */
split_page_memcg(struct page * head,int old_order,int new_order)3024  void split_page_memcg(struct page *head, int old_order, int new_order)
3025  {
3026  	struct folio *folio = page_folio(head);
3027  	int i;
3028  	unsigned int old_nr = 1 << old_order;
3029  	unsigned int new_nr = 1 << new_order;
3030  
3031  	if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3032  		return;
3033  
3034  	for (i = new_nr; i < old_nr; i += new_nr)
3035  		folio_page(folio, i)->memcg_data = folio->memcg_data;
3036  
3037  	if (folio_memcg_kmem(folio))
3038  		obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3039  	else
3040  		css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
3041  }
3042  
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3043  unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3044  {
3045  	unsigned long val;
3046  
3047  	if (mem_cgroup_is_root(memcg)) {
3048  		/*
3049  		 * Approximate root's usage from global state. This isn't
3050  		 * perfect, but the root usage was always an approximation.
3051  		 */
3052  		val = global_node_page_state(NR_FILE_PAGES) +
3053  			global_node_page_state(NR_ANON_MAPPED);
3054  		if (swap)
3055  			val += total_swap_pages - get_nr_swap_pages();
3056  	} else {
3057  		if (!swap)
3058  			val = page_counter_read(&memcg->memory);
3059  		else
3060  			val = page_counter_read(&memcg->memsw);
3061  	}
3062  	return val;
3063  }
3064  
memcg_online_kmem(struct mem_cgroup * memcg)3065  static int memcg_online_kmem(struct mem_cgroup *memcg)
3066  {
3067  	struct obj_cgroup *objcg;
3068  
3069  	if (mem_cgroup_kmem_disabled())
3070  		return 0;
3071  
3072  	if (unlikely(mem_cgroup_is_root(memcg)))
3073  		return 0;
3074  
3075  	objcg = obj_cgroup_alloc();
3076  	if (!objcg)
3077  		return -ENOMEM;
3078  
3079  	objcg->memcg = memcg;
3080  	rcu_assign_pointer(memcg->objcg, objcg);
3081  	obj_cgroup_get(objcg);
3082  	memcg->orig_objcg = objcg;
3083  
3084  	static_branch_enable(&memcg_kmem_online_key);
3085  
3086  	memcg->kmemcg_id = memcg->id.id;
3087  
3088  	return 0;
3089  }
3090  
memcg_offline_kmem(struct mem_cgroup * memcg)3091  static void memcg_offline_kmem(struct mem_cgroup *memcg)
3092  {
3093  	struct mem_cgroup *parent;
3094  
3095  	if (mem_cgroup_kmem_disabled())
3096  		return;
3097  
3098  	if (unlikely(mem_cgroup_is_root(memcg)))
3099  		return;
3100  
3101  	parent = parent_mem_cgroup(memcg);
3102  	if (!parent)
3103  		parent = root_mem_cgroup;
3104  
3105  	memcg_reparent_objcgs(memcg, parent);
3106  
3107  	/*
3108  	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3109  	 * corresponding to this cgroup are guaranteed to remain empty.
3110  	 * The ordering is imposed by list_lru_node->lock taken by
3111  	 * memcg_reparent_list_lrus().
3112  	 */
3113  	memcg_reparent_list_lrus(memcg, parent);
3114  }
3115  
3116  #ifdef CONFIG_CGROUP_WRITEBACK
3117  
3118  #include <trace/events/writeback.h>
3119  
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3120  static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3121  {
3122  	return wb_domain_init(&memcg->cgwb_domain, gfp);
3123  }
3124  
memcg_wb_domain_exit(struct mem_cgroup * memcg)3125  static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3126  {
3127  	wb_domain_exit(&memcg->cgwb_domain);
3128  }
3129  
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3130  static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3131  {
3132  	wb_domain_size_changed(&memcg->cgwb_domain);
3133  }
3134  
mem_cgroup_wb_domain(struct bdi_writeback * wb)3135  struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3136  {
3137  	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3138  
3139  	if (!memcg->css.parent)
3140  		return NULL;
3141  
3142  	return &memcg->cgwb_domain;
3143  }
3144  
3145  /**
3146   * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3147   * @wb: bdi_writeback in question
3148   * @pfilepages: out parameter for number of file pages
3149   * @pheadroom: out parameter for number of allocatable pages according to memcg
3150   * @pdirty: out parameter for number of dirty pages
3151   * @pwriteback: out parameter for number of pages under writeback
3152   *
3153   * Determine the numbers of file, headroom, dirty, and writeback pages in
3154   * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3155   * is a bit more involved.
3156   *
3157   * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3158   * headroom is calculated as the lowest headroom of itself and the
3159   * ancestors.  Note that this doesn't consider the actual amount of
3160   * available memory in the system.  The caller should further cap
3161   * *@pheadroom accordingly.
3162   */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)3163  void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3164  			 unsigned long *pheadroom, unsigned long *pdirty,
3165  			 unsigned long *pwriteback)
3166  {
3167  	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3168  	struct mem_cgroup *parent;
3169  
3170  	mem_cgroup_flush_stats_ratelimited(memcg);
3171  
3172  	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3173  	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3174  	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3175  			memcg_page_state(memcg, NR_ACTIVE_FILE);
3176  
3177  	*pheadroom = PAGE_COUNTER_MAX;
3178  	while ((parent = parent_mem_cgroup(memcg))) {
3179  		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3180  					    READ_ONCE(memcg->memory.high));
3181  		unsigned long used = page_counter_read(&memcg->memory);
3182  
3183  		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3184  		memcg = parent;
3185  	}
3186  }
3187  
3188  /*
3189   * Foreign dirty flushing
3190   *
3191   * There's an inherent mismatch between memcg and writeback.  The former
3192   * tracks ownership per-page while the latter per-inode.  This was a
3193   * deliberate design decision because honoring per-page ownership in the
3194   * writeback path is complicated, may lead to higher CPU and IO overheads
3195   * and deemed unnecessary given that write-sharing an inode across
3196   * different cgroups isn't a common use-case.
3197   *
3198   * Combined with inode majority-writer ownership switching, this works well
3199   * enough in most cases but there are some pathological cases.  For
3200   * example, let's say there are two cgroups A and B which keep writing to
3201   * different but confined parts of the same inode.  B owns the inode and
3202   * A's memory is limited far below B's.  A's dirty ratio can rise enough to
3203   * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3204   * triggering background writeback.  A will be slowed down without a way to
3205   * make writeback of the dirty pages happen.
3206   *
3207   * Conditions like the above can lead to a cgroup getting repeatedly and
3208   * severely throttled after making some progress after each
3209   * dirty_expire_interval while the underlying IO device is almost
3210   * completely idle.
3211   *
3212   * Solving this problem completely requires matching the ownership tracking
3213   * granularities between memcg and writeback in either direction.  However,
3214   * the more egregious behaviors can be avoided by simply remembering the
3215   * most recent foreign dirtying events and initiating remote flushes on
3216   * them when local writeback isn't enough to keep the memory clean enough.
3217   *
3218   * The following two functions implement such mechanism.  When a foreign
3219   * page - a page whose memcg and writeback ownerships don't match - is
3220   * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3221   * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
3222   * decides that the memcg needs to sleep due to high dirty ratio, it calls
3223   * mem_cgroup_flush_foreign() which queues writeback on the recorded
3224   * foreign bdi_writebacks which haven't expired.  Both the numbers of
3225   * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3226   * limited to MEMCG_CGWB_FRN_CNT.
3227   *
3228   * The mechanism only remembers IDs and doesn't hold any object references.
3229   * As being wrong occasionally doesn't matter, updates and accesses to the
3230   * records are lockless and racy.
3231   */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)3232  void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3233  					     struct bdi_writeback *wb)
3234  {
3235  	struct mem_cgroup *memcg = folio_memcg(folio);
3236  	struct memcg_cgwb_frn *frn;
3237  	u64 now = get_jiffies_64();
3238  	u64 oldest_at = now;
3239  	int oldest = -1;
3240  	int i;
3241  
3242  	trace_track_foreign_dirty(folio, wb);
3243  
3244  	/*
3245  	 * Pick the slot to use.  If there is already a slot for @wb, keep
3246  	 * using it.  If not replace the oldest one which isn't being
3247  	 * written out.
3248  	 */
3249  	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3250  		frn = &memcg->cgwb_frn[i];
3251  		if (frn->bdi_id == wb->bdi->id &&
3252  		    frn->memcg_id == wb->memcg_css->id)
3253  			break;
3254  		if (time_before64(frn->at, oldest_at) &&
3255  		    atomic_read(&frn->done.cnt) == 1) {
3256  			oldest = i;
3257  			oldest_at = frn->at;
3258  		}
3259  	}
3260  
3261  	if (i < MEMCG_CGWB_FRN_CNT) {
3262  		/*
3263  		 * Re-using an existing one.  Update timestamp lazily to
3264  		 * avoid making the cacheline hot.  We want them to be
3265  		 * reasonably up-to-date and significantly shorter than
3266  		 * dirty_expire_interval as that's what expires the record.
3267  		 * Use the shorter of 1s and dirty_expire_interval / 8.
3268  		 */
3269  		unsigned long update_intv =
3270  			min_t(unsigned long, HZ,
3271  			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3272  
3273  		if (time_before64(frn->at, now - update_intv))
3274  			frn->at = now;
3275  	} else if (oldest >= 0) {
3276  		/* replace the oldest free one */
3277  		frn = &memcg->cgwb_frn[oldest];
3278  		frn->bdi_id = wb->bdi->id;
3279  		frn->memcg_id = wb->memcg_css->id;
3280  		frn->at = now;
3281  	}
3282  }
3283  
3284  /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)3285  void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3286  {
3287  	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3288  	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3289  	u64 now = jiffies_64;
3290  	int i;
3291  
3292  	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3293  		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3294  
3295  		/*
3296  		 * If the record is older than dirty_expire_interval,
3297  		 * writeback on it has already started.  No need to kick it
3298  		 * off again.  Also, don't start a new one if there's
3299  		 * already one in flight.
3300  		 */
3301  		if (time_after64(frn->at, now - intv) &&
3302  		    atomic_read(&frn->done.cnt) == 1) {
3303  			frn->at = 0;
3304  			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3305  			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3306  					       WB_REASON_FOREIGN_FLUSH,
3307  					       &frn->done);
3308  		}
3309  	}
3310  }
3311  
3312  #else	/* CONFIG_CGROUP_WRITEBACK */
3313  
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3314  static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3315  {
3316  	return 0;
3317  }
3318  
memcg_wb_domain_exit(struct mem_cgroup * memcg)3319  static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3320  {
3321  }
3322  
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3323  static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3324  {
3325  }
3326  
3327  #endif	/* CONFIG_CGROUP_WRITEBACK */
3328  
3329  /*
3330   * Private memory cgroup IDR
3331   *
3332   * Swap-out records and page cache shadow entries need to store memcg
3333   * references in constrained space, so we maintain an ID space that is
3334   * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3335   * memory-controlled cgroups to 64k.
3336   *
3337   * However, there usually are many references to the offline CSS after
3338   * the cgroup has been destroyed, such as page cache or reclaimable
3339   * slab objects, that don't need to hang on to the ID. We want to keep
3340   * those dead CSS from occupying IDs, or we might quickly exhaust the
3341   * relatively small ID space and prevent the creation of new cgroups
3342   * even when there are much fewer than 64k cgroups - possibly none.
3343   *
3344   * Maintain a private 16-bit ID space for memcg, and allow the ID to
3345   * be freed and recycled when it's no longer needed, which is usually
3346   * when the CSS is offlined.
3347   *
3348   * The only exception to that are records of swapped out tmpfs/shmem
3349   * pages that need to be attributed to live ancestors on swapin. But
3350   * those references are manageable from userspace.
3351   */
3352  
3353  #define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3354  static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
3355  
mem_cgroup_id_remove(struct mem_cgroup * memcg)3356  static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3357  {
3358  	if (memcg->id.id > 0) {
3359  		xa_erase(&mem_cgroup_ids, memcg->id.id);
3360  		memcg->id.id = 0;
3361  	}
3362  }
3363  
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)3364  void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3365  					   unsigned int n)
3366  {
3367  	refcount_add(n, &memcg->id.ref);
3368  }
3369  
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)3370  void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3371  {
3372  	if (refcount_sub_and_test(n, &memcg->id.ref)) {
3373  		mem_cgroup_id_remove(memcg);
3374  
3375  		/* Memcg ID pins CSS */
3376  		css_put(&memcg->css);
3377  	}
3378  }
3379  
mem_cgroup_id_put(struct mem_cgroup * memcg)3380  static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3381  {
3382  	mem_cgroup_id_put_many(memcg, 1);
3383  }
3384  
3385  /**
3386   * mem_cgroup_from_id - look up a memcg from a memcg id
3387   * @id: the memcg id to look up
3388   *
3389   * Caller must hold rcu_read_lock().
3390   */
mem_cgroup_from_id(unsigned short id)3391  struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
3392  {
3393  	WARN_ON_ONCE(!rcu_read_lock_held());
3394  	return xa_load(&mem_cgroup_ids, id);
3395  }
3396  
3397  #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_get_from_ino(unsigned long ino)3398  struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3399  {
3400  	struct cgroup *cgrp;
3401  	struct cgroup_subsys_state *css;
3402  	struct mem_cgroup *memcg;
3403  
3404  	cgrp = cgroup_get_from_id(ino);
3405  	if (IS_ERR(cgrp))
3406  		return ERR_CAST(cgrp);
3407  
3408  	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3409  	if (css)
3410  		memcg = container_of(css, struct mem_cgroup, css);
3411  	else
3412  		memcg = ERR_PTR(-ENOENT);
3413  
3414  	cgroup_put(cgrp);
3415  
3416  	return memcg;
3417  }
3418  #endif
3419  
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3420  static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3421  {
3422  	struct mem_cgroup_per_node *pn;
3423  
3424  	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
3425  	if (!pn)
3426  		return false;
3427  
3428  	pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3429  					GFP_KERNEL_ACCOUNT, node);
3430  	if (!pn->lruvec_stats)
3431  		goto fail;
3432  
3433  	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3434  						   GFP_KERNEL_ACCOUNT);
3435  	if (!pn->lruvec_stats_percpu)
3436  		goto fail;
3437  
3438  	lruvec_init(&pn->lruvec);
3439  	pn->memcg = memcg;
3440  
3441  	memcg->nodeinfo[node] = pn;
3442  	return true;
3443  fail:
3444  	kfree(pn->lruvec_stats);
3445  	kfree(pn);
3446  	return false;
3447  }
3448  
free_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3449  static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3450  {
3451  	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3452  
3453  	if (!pn)
3454  		return;
3455  
3456  	free_percpu(pn->lruvec_stats_percpu);
3457  	kfree(pn->lruvec_stats);
3458  	kfree(pn);
3459  }
3460  
__mem_cgroup_free(struct mem_cgroup * memcg)3461  static void __mem_cgroup_free(struct mem_cgroup *memcg)
3462  {
3463  	int node;
3464  
3465  	obj_cgroup_put(memcg->orig_objcg);
3466  
3467  	for_each_node(node)
3468  		free_mem_cgroup_per_node_info(memcg, node);
3469  	memcg1_free_events(memcg);
3470  	kfree(memcg->vmstats);
3471  	free_percpu(memcg->vmstats_percpu);
3472  	kfree(memcg);
3473  }
3474  
mem_cgroup_free(struct mem_cgroup * memcg)3475  static void mem_cgroup_free(struct mem_cgroup *memcg)
3476  {
3477  	lru_gen_exit_memcg(memcg);
3478  	memcg_wb_domain_exit(memcg);
3479  	__mem_cgroup_free(memcg);
3480  }
3481  
mem_cgroup_alloc(struct mem_cgroup * parent)3482  static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3483  {
3484  	struct memcg_vmstats_percpu *statc, *pstatc;
3485  	struct mem_cgroup *memcg;
3486  	int node, cpu;
3487  	int __maybe_unused i;
3488  	long error;
3489  
3490  	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
3491  	if (!memcg)
3492  		return ERR_PTR(-ENOMEM);
3493  
3494  	error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3495  			 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3496  	if (error)
3497  		goto fail;
3498  	error = -ENOMEM;
3499  
3500  	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3501  				 GFP_KERNEL_ACCOUNT);
3502  	if (!memcg->vmstats)
3503  		goto fail;
3504  
3505  	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3506  						 GFP_KERNEL_ACCOUNT);
3507  	if (!memcg->vmstats_percpu)
3508  		goto fail;
3509  
3510  	if (!memcg1_alloc_events(memcg))
3511  		goto fail;
3512  
3513  	for_each_possible_cpu(cpu) {
3514  		if (parent)
3515  			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3516  		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3517  		statc->parent = parent ? pstatc : NULL;
3518  		statc->vmstats = memcg->vmstats;
3519  	}
3520  
3521  	for_each_node(node)
3522  		if (!alloc_mem_cgroup_per_node_info(memcg, node))
3523  			goto fail;
3524  
3525  	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3526  		goto fail;
3527  
3528  	INIT_WORK(&memcg->high_work, high_work_func);
3529  	vmpressure_init(&memcg->vmpressure);
3530  	INIT_LIST_HEAD(&memcg->memory_peaks);
3531  	INIT_LIST_HEAD(&memcg->swap_peaks);
3532  	spin_lock_init(&memcg->peaks_lock);
3533  	memcg->socket_pressure = jiffies;
3534  	memcg1_memcg_init(memcg);
3535  	memcg->kmemcg_id = -1;
3536  	INIT_LIST_HEAD(&memcg->objcg_list);
3537  #ifdef CONFIG_CGROUP_WRITEBACK
3538  	INIT_LIST_HEAD(&memcg->cgwb_list);
3539  	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3540  		memcg->cgwb_frn[i].done =
3541  			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3542  #endif
3543  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3544  	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3545  	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3546  	memcg->deferred_split_queue.split_queue_len = 0;
3547  #endif
3548  	lru_gen_init_memcg(memcg);
3549  	return memcg;
3550  fail:
3551  	mem_cgroup_id_remove(memcg);
3552  	__mem_cgroup_free(memcg);
3553  	return ERR_PTR(error);
3554  }
3555  
3556  static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)3557  mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3558  {
3559  	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3560  	struct mem_cgroup *memcg, *old_memcg;
3561  
3562  	old_memcg = set_active_memcg(parent);
3563  	memcg = mem_cgroup_alloc(parent);
3564  	set_active_memcg(old_memcg);
3565  	if (IS_ERR(memcg))
3566  		return ERR_CAST(memcg);
3567  
3568  	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3569  	memcg1_soft_limit_reset(memcg);
3570  #ifdef CONFIG_ZSWAP
3571  	memcg->zswap_max = PAGE_COUNTER_MAX;
3572  	WRITE_ONCE(memcg->zswap_writeback, true);
3573  #endif
3574  	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3575  	if (parent) {
3576  		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3577  
3578  		page_counter_init(&memcg->memory, &parent->memory, true);
3579  		page_counter_init(&memcg->swap, &parent->swap, false);
3580  #ifdef CONFIG_MEMCG_V1
3581  		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3582  		page_counter_init(&memcg->kmem, &parent->kmem, false);
3583  		page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3584  #endif
3585  	} else {
3586  		init_memcg_stats();
3587  		init_memcg_events();
3588  		page_counter_init(&memcg->memory, NULL, true);
3589  		page_counter_init(&memcg->swap, NULL, false);
3590  #ifdef CONFIG_MEMCG_V1
3591  		page_counter_init(&memcg->kmem, NULL, false);
3592  		page_counter_init(&memcg->tcpmem, NULL, false);
3593  #endif
3594  		root_mem_cgroup = memcg;
3595  		return &memcg->css;
3596  	}
3597  
3598  	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3599  		static_branch_inc(&memcg_sockets_enabled_key);
3600  
3601  	if (!cgroup_memory_nobpf)
3602  		static_branch_inc(&memcg_bpf_enabled_key);
3603  
3604  	return &memcg->css;
3605  }
3606  
mem_cgroup_css_online(struct cgroup_subsys_state * css)3607  static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3608  {
3609  	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3610  
3611  	if (memcg_online_kmem(memcg))
3612  		goto remove_id;
3613  
3614  	/*
3615  	 * A memcg must be visible for expand_shrinker_info()
3616  	 * by the time the maps are allocated. So, we allocate maps
3617  	 * here, when for_each_mem_cgroup() can't skip it.
3618  	 */
3619  	if (alloc_shrinker_info(memcg))
3620  		goto offline_kmem;
3621  
3622  	if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3623  		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3624  				   FLUSH_TIME);
3625  	lru_gen_online_memcg(memcg);
3626  
3627  	/* Online state pins memcg ID, memcg ID pins CSS */
3628  	refcount_set(&memcg->id.ref, 1);
3629  	css_get(css);
3630  
3631  	/*
3632  	 * Ensure mem_cgroup_from_id() works once we're fully online.
3633  	 *
3634  	 * We could do this earlier and require callers to filter with
3635  	 * css_tryget_online(). But right now there are no users that
3636  	 * need earlier access, and the workingset code relies on the
3637  	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3638  	 * publish it here at the end of onlining. This matches the
3639  	 * regular ID destruction during offlining.
3640  	 */
3641  	xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3642  
3643  	return 0;
3644  offline_kmem:
3645  	memcg_offline_kmem(memcg);
3646  remove_id:
3647  	mem_cgroup_id_remove(memcg);
3648  	return -ENOMEM;
3649  }
3650  
mem_cgroup_css_offline(struct cgroup_subsys_state * css)3651  static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3652  {
3653  	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3654  
3655  	memcg1_css_offline(memcg);
3656  
3657  	page_counter_set_min(&memcg->memory, 0);
3658  	page_counter_set_low(&memcg->memory, 0);
3659  
3660  	zswap_memcg_offline_cleanup(memcg);
3661  
3662  	memcg_offline_kmem(memcg);
3663  	reparent_shrinker_deferred(memcg);
3664  	wb_memcg_offline(memcg);
3665  	lru_gen_offline_memcg(memcg);
3666  
3667  	drain_all_stock(memcg);
3668  
3669  	mem_cgroup_id_put(memcg);
3670  }
3671  
mem_cgroup_css_released(struct cgroup_subsys_state * css)3672  static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3673  {
3674  	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3675  
3676  	invalidate_reclaim_iterators(memcg);
3677  	lru_gen_release_memcg(memcg);
3678  }
3679  
mem_cgroup_css_free(struct cgroup_subsys_state * css)3680  static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3681  {
3682  	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3683  	int __maybe_unused i;
3684  
3685  #ifdef CONFIG_CGROUP_WRITEBACK
3686  	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3687  		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3688  #endif
3689  	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3690  		static_branch_dec(&memcg_sockets_enabled_key);
3691  
3692  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3693  		static_branch_dec(&memcg_sockets_enabled_key);
3694  
3695  	if (!cgroup_memory_nobpf)
3696  		static_branch_dec(&memcg_bpf_enabled_key);
3697  
3698  	vmpressure_cleanup(&memcg->vmpressure);
3699  	cancel_work_sync(&memcg->high_work);
3700  	memcg1_remove_from_trees(memcg);
3701  	free_shrinker_info(memcg);
3702  	mem_cgroup_free(memcg);
3703  }
3704  
3705  /**
3706   * mem_cgroup_css_reset - reset the states of a mem_cgroup
3707   * @css: the target css
3708   *
3709   * Reset the states of the mem_cgroup associated with @css.  This is
3710   * invoked when the userland requests disabling on the default hierarchy
3711   * but the memcg is pinned through dependency.  The memcg should stop
3712   * applying policies and should revert to the vanilla state as it may be
3713   * made visible again.
3714   *
3715   * The current implementation only resets the essential configurations.
3716   * This needs to be expanded to cover all the visible parts.
3717   */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)3718  static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3719  {
3720  	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3721  
3722  	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3723  	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3724  #ifdef CONFIG_MEMCG_V1
3725  	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3726  	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3727  #endif
3728  	page_counter_set_min(&memcg->memory, 0);
3729  	page_counter_set_low(&memcg->memory, 0);
3730  	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3731  	memcg1_soft_limit_reset(memcg);
3732  	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3733  	memcg_wb_domain_size_changed(memcg);
3734  }
3735  
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)3736  static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3737  {
3738  	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3739  	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3740  	struct memcg_vmstats_percpu *statc;
3741  	long delta, delta_cpu, v;
3742  	int i, nid;
3743  
3744  	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3745  
3746  	for (i = 0; i < MEMCG_VMSTAT_SIZE; i++) {
3747  		/*
3748  		 * Collect the aggregated propagation counts of groups
3749  		 * below us. We're in a per-cpu loop here and this is
3750  		 * a global counter, so the first cycle will get them.
3751  		 */
3752  		delta = memcg->vmstats->state_pending[i];
3753  		if (delta)
3754  			memcg->vmstats->state_pending[i] = 0;
3755  
3756  		/* Add CPU changes on this level since the last flush */
3757  		delta_cpu = 0;
3758  		v = READ_ONCE(statc->state[i]);
3759  		if (v != statc->state_prev[i]) {
3760  			delta_cpu = v - statc->state_prev[i];
3761  			delta += delta_cpu;
3762  			statc->state_prev[i] = v;
3763  		}
3764  
3765  		/* Aggregate counts on this level and propagate upwards */
3766  		if (delta_cpu)
3767  			memcg->vmstats->state_local[i] += delta_cpu;
3768  
3769  		if (delta) {
3770  			memcg->vmstats->state[i] += delta;
3771  			if (parent)
3772  				parent->vmstats->state_pending[i] += delta;
3773  		}
3774  	}
3775  
3776  	for (i = 0; i < NR_MEMCG_EVENTS; i++) {
3777  		delta = memcg->vmstats->events_pending[i];
3778  		if (delta)
3779  			memcg->vmstats->events_pending[i] = 0;
3780  
3781  		delta_cpu = 0;
3782  		v = READ_ONCE(statc->events[i]);
3783  		if (v != statc->events_prev[i]) {
3784  			delta_cpu = v - statc->events_prev[i];
3785  			delta += delta_cpu;
3786  			statc->events_prev[i] = v;
3787  		}
3788  
3789  		if (delta_cpu)
3790  			memcg->vmstats->events_local[i] += delta_cpu;
3791  
3792  		if (delta) {
3793  			memcg->vmstats->events[i] += delta;
3794  			if (parent)
3795  				parent->vmstats->events_pending[i] += delta;
3796  		}
3797  	}
3798  
3799  	for_each_node_state(nid, N_MEMORY) {
3800  		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
3801  		struct lruvec_stats *lstats = pn->lruvec_stats;
3802  		struct lruvec_stats *plstats = NULL;
3803  		struct lruvec_stats_percpu *lstatc;
3804  
3805  		if (parent)
3806  			plstats = parent->nodeinfo[nid]->lruvec_stats;
3807  
3808  		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
3809  
3810  		for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; i++) {
3811  			delta = lstats->state_pending[i];
3812  			if (delta)
3813  				lstats->state_pending[i] = 0;
3814  
3815  			delta_cpu = 0;
3816  			v = READ_ONCE(lstatc->state[i]);
3817  			if (v != lstatc->state_prev[i]) {
3818  				delta_cpu = v - lstatc->state_prev[i];
3819  				delta += delta_cpu;
3820  				lstatc->state_prev[i] = v;
3821  			}
3822  
3823  			if (delta_cpu)
3824  				lstats->state_local[i] += delta_cpu;
3825  
3826  			if (delta) {
3827  				lstats->state[i] += delta;
3828  				if (plstats)
3829  					plstats->state_pending[i] += delta;
3830  			}
3831  		}
3832  	}
3833  	WRITE_ONCE(statc->stats_updates, 0);
3834  	/* We are in a per-cpu loop here, only do the atomic write once */
3835  	if (atomic64_read(&memcg->vmstats->stats_updates))
3836  		atomic64_set(&memcg->vmstats->stats_updates, 0);
3837  }
3838  
mem_cgroup_fork(struct task_struct * task)3839  static void mem_cgroup_fork(struct task_struct *task)
3840  {
3841  	/*
3842  	 * Set the update flag to cause task->objcg to be initialized lazily
3843  	 * on the first allocation. It can be done without any synchronization
3844  	 * because it's always performed on the current task, so does
3845  	 * current_objcg_update().
3846  	 */
3847  	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
3848  }
3849  
mem_cgroup_exit(struct task_struct * task)3850  static void mem_cgroup_exit(struct task_struct *task)
3851  {
3852  	struct obj_cgroup *objcg = task->objcg;
3853  
3854  	objcg = (struct obj_cgroup *)
3855  		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
3856  	obj_cgroup_put(objcg);
3857  
3858  	/*
3859  	 * Some kernel allocations can happen after this point,
3860  	 * but let's ignore them. It can be done without any synchronization
3861  	 * because it's always performed on the current task, so does
3862  	 * current_objcg_update().
3863  	 */
3864  	task->objcg = NULL;
3865  }
3866  
3867  #ifdef CONFIG_LRU_GEN
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3868  static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
3869  {
3870  	struct task_struct *task;
3871  	struct cgroup_subsys_state *css;
3872  
3873  	/* find the first leader if there is any */
3874  	cgroup_taskset_for_each_leader(task, css, tset)
3875  		break;
3876  
3877  	if (!task)
3878  		return;
3879  
3880  	task_lock(task);
3881  	if (task->mm && READ_ONCE(task->mm->owner) == task)
3882  		lru_gen_migrate_mm(task->mm);
3883  	task_unlock(task);
3884  }
3885  #else
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3886  static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
3887  #endif /* CONFIG_LRU_GEN */
3888  
mem_cgroup_kmem_attach(struct cgroup_taskset * tset)3889  static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
3890  {
3891  	struct task_struct *task;
3892  	struct cgroup_subsys_state *css;
3893  
3894  	cgroup_taskset_for_each(task, css, tset) {
3895  		/* atomically set the update bit */
3896  		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
3897  	}
3898  }
3899  
mem_cgroup_attach(struct cgroup_taskset * tset)3900  static void mem_cgroup_attach(struct cgroup_taskset *tset)
3901  {
3902  	mem_cgroup_lru_gen_attach(tset);
3903  	mem_cgroup_kmem_attach(tset);
3904  }
3905  
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)3906  static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
3907  {
3908  	if (value == PAGE_COUNTER_MAX)
3909  		seq_puts(m, "max\n");
3910  	else
3911  		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
3912  
3913  	return 0;
3914  }
3915  
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)3916  static u64 memory_current_read(struct cgroup_subsys_state *css,
3917  			       struct cftype *cft)
3918  {
3919  	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3920  
3921  	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
3922  }
3923  
3924  #define OFP_PEAK_UNSET (((-1UL)))
3925  
peak_show(struct seq_file * sf,void * v,struct page_counter * pc)3926  static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
3927  {
3928  	struct cgroup_of_peak *ofp = of_peak(sf->private);
3929  	u64 fd_peak = READ_ONCE(ofp->value), peak;
3930  
3931  	/* User wants global or local peak? */
3932  	if (fd_peak == OFP_PEAK_UNSET)
3933  		peak = pc->watermark;
3934  	else
3935  		peak = max(fd_peak, READ_ONCE(pc->local_watermark));
3936  
3937  	seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
3938  	return 0;
3939  }
3940  
memory_peak_show(struct seq_file * sf,void * v)3941  static int memory_peak_show(struct seq_file *sf, void *v)
3942  {
3943  	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3944  
3945  	return peak_show(sf, v, &memcg->memory);
3946  }
3947  
peak_open(struct kernfs_open_file * of)3948  static int peak_open(struct kernfs_open_file *of)
3949  {
3950  	struct cgroup_of_peak *ofp = of_peak(of);
3951  
3952  	ofp->value = OFP_PEAK_UNSET;
3953  	return 0;
3954  }
3955  
peak_release(struct kernfs_open_file * of)3956  static void peak_release(struct kernfs_open_file *of)
3957  {
3958  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3959  	struct cgroup_of_peak *ofp = of_peak(of);
3960  
3961  	if (ofp->value == OFP_PEAK_UNSET) {
3962  		/* fast path (no writes on this fd) */
3963  		return;
3964  	}
3965  	spin_lock(&memcg->peaks_lock);
3966  	list_del(&ofp->list);
3967  	spin_unlock(&memcg->peaks_lock);
3968  }
3969  
peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,struct page_counter * pc,struct list_head * watchers)3970  static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
3971  			  loff_t off, struct page_counter *pc,
3972  			  struct list_head *watchers)
3973  {
3974  	unsigned long usage;
3975  	struct cgroup_of_peak *peer_ctx;
3976  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3977  	struct cgroup_of_peak *ofp = of_peak(of);
3978  
3979  	spin_lock(&memcg->peaks_lock);
3980  
3981  	usage = page_counter_read(pc);
3982  	WRITE_ONCE(pc->local_watermark, usage);
3983  
3984  	list_for_each_entry(peer_ctx, watchers, list)
3985  		if (usage > peer_ctx->value)
3986  			WRITE_ONCE(peer_ctx->value, usage);
3987  
3988  	/* initial write, register watcher */
3989  	if (ofp->value == -1)
3990  		list_add(&ofp->list, watchers);
3991  
3992  	WRITE_ONCE(ofp->value, usage);
3993  	spin_unlock(&memcg->peaks_lock);
3994  
3995  	return nbytes;
3996  }
3997  
memory_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3998  static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
3999  				 size_t nbytes, loff_t off)
4000  {
4001  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4002  
4003  	return peak_write(of, buf, nbytes, off, &memcg->memory,
4004  			  &memcg->memory_peaks);
4005  }
4006  
4007  #undef OFP_PEAK_UNSET
4008  
memory_min_show(struct seq_file * m,void * v)4009  static int memory_min_show(struct seq_file *m, void *v)
4010  {
4011  	return seq_puts_memcg_tunable(m,
4012  		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4013  }
4014  
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4015  static ssize_t memory_min_write(struct kernfs_open_file *of,
4016  				char *buf, size_t nbytes, loff_t off)
4017  {
4018  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4019  	unsigned long min;
4020  	int err;
4021  
4022  	buf = strstrip(buf);
4023  	err = page_counter_memparse(buf, "max", &min);
4024  	if (err)
4025  		return err;
4026  
4027  	page_counter_set_min(&memcg->memory, min);
4028  
4029  	return nbytes;
4030  }
4031  
memory_low_show(struct seq_file * m,void * v)4032  static int memory_low_show(struct seq_file *m, void *v)
4033  {
4034  	return seq_puts_memcg_tunable(m,
4035  		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4036  }
4037  
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4038  static ssize_t memory_low_write(struct kernfs_open_file *of,
4039  				char *buf, size_t nbytes, loff_t off)
4040  {
4041  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4042  	unsigned long low;
4043  	int err;
4044  
4045  	buf = strstrip(buf);
4046  	err = page_counter_memparse(buf, "max", &low);
4047  	if (err)
4048  		return err;
4049  
4050  	page_counter_set_low(&memcg->memory, low);
4051  
4052  	return nbytes;
4053  }
4054  
memory_high_show(struct seq_file * m,void * v)4055  static int memory_high_show(struct seq_file *m, void *v)
4056  {
4057  	return seq_puts_memcg_tunable(m,
4058  		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4059  }
4060  
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4061  static ssize_t memory_high_write(struct kernfs_open_file *of,
4062  				 char *buf, size_t nbytes, loff_t off)
4063  {
4064  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4065  	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4066  	bool drained = false;
4067  	unsigned long high;
4068  	int err;
4069  
4070  	buf = strstrip(buf);
4071  	err = page_counter_memparse(buf, "max", &high);
4072  	if (err)
4073  		return err;
4074  
4075  	page_counter_set_high(&memcg->memory, high);
4076  
4077  	for (;;) {
4078  		unsigned long nr_pages = page_counter_read(&memcg->memory);
4079  		unsigned long reclaimed;
4080  
4081  		if (nr_pages <= high)
4082  			break;
4083  
4084  		if (signal_pending(current))
4085  			break;
4086  
4087  		if (!drained) {
4088  			drain_all_stock(memcg);
4089  			drained = true;
4090  			continue;
4091  		}
4092  
4093  		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4094  					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4095  
4096  		if (!reclaimed && !nr_retries--)
4097  			break;
4098  	}
4099  
4100  	memcg_wb_domain_size_changed(memcg);
4101  	return nbytes;
4102  }
4103  
memory_max_show(struct seq_file * m,void * v)4104  static int memory_max_show(struct seq_file *m, void *v)
4105  {
4106  	return seq_puts_memcg_tunable(m,
4107  		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4108  }
4109  
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4110  static ssize_t memory_max_write(struct kernfs_open_file *of,
4111  				char *buf, size_t nbytes, loff_t off)
4112  {
4113  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4114  	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4115  	bool drained = false;
4116  	unsigned long max;
4117  	int err;
4118  
4119  	buf = strstrip(buf);
4120  	err = page_counter_memparse(buf, "max", &max);
4121  	if (err)
4122  		return err;
4123  
4124  	xchg(&memcg->memory.max, max);
4125  
4126  	for (;;) {
4127  		unsigned long nr_pages = page_counter_read(&memcg->memory);
4128  
4129  		if (nr_pages <= max)
4130  			break;
4131  
4132  		if (signal_pending(current))
4133  			break;
4134  
4135  		if (!drained) {
4136  			drain_all_stock(memcg);
4137  			drained = true;
4138  			continue;
4139  		}
4140  
4141  		if (nr_reclaims) {
4142  			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4143  					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4144  				nr_reclaims--;
4145  			continue;
4146  		}
4147  
4148  		memcg_memory_event(memcg, MEMCG_OOM);
4149  		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4150  			break;
4151  	}
4152  
4153  	memcg_wb_domain_size_changed(memcg);
4154  	return nbytes;
4155  }
4156  
4157  /*
4158   * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4159   * if any new events become available.
4160   */
__memory_events_show(struct seq_file * m,atomic_long_t * events)4161  static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4162  {
4163  	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4164  	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4165  	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4166  	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4167  	seq_printf(m, "oom_kill %lu\n",
4168  		   atomic_long_read(&events[MEMCG_OOM_KILL]));
4169  	seq_printf(m, "oom_group_kill %lu\n",
4170  		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4171  }
4172  
memory_events_show(struct seq_file * m,void * v)4173  static int memory_events_show(struct seq_file *m, void *v)
4174  {
4175  	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4176  
4177  	__memory_events_show(m, memcg->memory_events);
4178  	return 0;
4179  }
4180  
memory_events_local_show(struct seq_file * m,void * v)4181  static int memory_events_local_show(struct seq_file *m, void *v)
4182  {
4183  	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4184  
4185  	__memory_events_show(m, memcg->memory_events_local);
4186  	return 0;
4187  }
4188  
memory_stat_show(struct seq_file * m,void * v)4189  int memory_stat_show(struct seq_file *m, void *v)
4190  {
4191  	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4192  	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
4193  	struct seq_buf s;
4194  
4195  	if (!buf)
4196  		return -ENOMEM;
4197  	seq_buf_init(&s, buf, PAGE_SIZE);
4198  	memory_stat_format(memcg, &s);
4199  	seq_puts(m, buf);
4200  	kfree(buf);
4201  	return 0;
4202  }
4203  
4204  #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)4205  static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4206  						     int item)
4207  {
4208  	return lruvec_page_state(lruvec, item) *
4209  		memcg_page_state_output_unit(item);
4210  }
4211  
memory_numa_stat_show(struct seq_file * m,void * v)4212  static int memory_numa_stat_show(struct seq_file *m, void *v)
4213  {
4214  	int i;
4215  	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4216  
4217  	mem_cgroup_flush_stats(memcg);
4218  
4219  	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4220  		int nid;
4221  
4222  		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4223  			continue;
4224  
4225  		seq_printf(m, "%s", memory_stats[i].name);
4226  		for_each_node_state(nid, N_MEMORY) {
4227  			u64 size;
4228  			struct lruvec *lruvec;
4229  
4230  			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4231  			size = lruvec_page_state_output(lruvec,
4232  							memory_stats[i].idx);
4233  			seq_printf(m, " N%d=%llu", nid, size);
4234  		}
4235  		seq_putc(m, '\n');
4236  	}
4237  
4238  	return 0;
4239  }
4240  #endif
4241  
memory_oom_group_show(struct seq_file * m,void * v)4242  static int memory_oom_group_show(struct seq_file *m, void *v)
4243  {
4244  	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4245  
4246  	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4247  
4248  	return 0;
4249  }
4250  
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4251  static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4252  				      char *buf, size_t nbytes, loff_t off)
4253  {
4254  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4255  	int ret, oom_group;
4256  
4257  	buf = strstrip(buf);
4258  	if (!buf)
4259  		return -EINVAL;
4260  
4261  	ret = kstrtoint(buf, 0, &oom_group);
4262  	if (ret)
4263  		return ret;
4264  
4265  	if (oom_group != 0 && oom_group != 1)
4266  		return -EINVAL;
4267  
4268  	WRITE_ONCE(memcg->oom_group, oom_group);
4269  
4270  	return nbytes;
4271  }
4272  
4273  enum {
4274  	MEMORY_RECLAIM_SWAPPINESS = 0,
4275  	MEMORY_RECLAIM_NULL,
4276  };
4277  
4278  static const match_table_t tokens = {
4279  	{ MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
4280  	{ MEMORY_RECLAIM_NULL, NULL },
4281  };
4282  
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4283  static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4284  			      size_t nbytes, loff_t off)
4285  {
4286  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4287  	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4288  	unsigned long nr_to_reclaim, nr_reclaimed = 0;
4289  	int swappiness = -1;
4290  	unsigned int reclaim_options;
4291  	char *old_buf, *start;
4292  	substring_t args[MAX_OPT_ARGS];
4293  
4294  	buf = strstrip(buf);
4295  
4296  	old_buf = buf;
4297  	nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
4298  	if (buf == old_buf)
4299  		return -EINVAL;
4300  
4301  	buf = strstrip(buf);
4302  
4303  	while ((start = strsep(&buf, " ")) != NULL) {
4304  		if (!strlen(start))
4305  			continue;
4306  		switch (match_token(start, tokens, args)) {
4307  		case MEMORY_RECLAIM_SWAPPINESS:
4308  			if (match_int(&args[0], &swappiness))
4309  				return -EINVAL;
4310  			if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS)
4311  				return -EINVAL;
4312  			break;
4313  		default:
4314  			return -EINVAL;
4315  		}
4316  	}
4317  
4318  	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
4319  	while (nr_reclaimed < nr_to_reclaim) {
4320  		/* Will converge on zero, but reclaim enforces a minimum */
4321  		unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4322  		unsigned long reclaimed;
4323  
4324  		if (signal_pending(current))
4325  			return -EINTR;
4326  
4327  		/*
4328  		 * This is the final attempt, drain percpu lru caches in the
4329  		 * hope of introducing more evictable pages for
4330  		 * try_to_free_mem_cgroup_pages().
4331  		 */
4332  		if (!nr_retries)
4333  			lru_add_drain_all();
4334  
4335  		reclaimed = try_to_free_mem_cgroup_pages(memcg,
4336  					batch_size, GFP_KERNEL,
4337  					reclaim_options,
4338  					swappiness == -1 ? NULL : &swappiness);
4339  
4340  		if (!reclaimed && !nr_retries--)
4341  			return -EAGAIN;
4342  
4343  		nr_reclaimed += reclaimed;
4344  	}
4345  
4346  	return nbytes;
4347  }
4348  
4349  static struct cftype memory_files[] = {
4350  	{
4351  		.name = "current",
4352  		.flags = CFTYPE_NOT_ON_ROOT,
4353  		.read_u64 = memory_current_read,
4354  	},
4355  	{
4356  		.name = "peak",
4357  		.flags = CFTYPE_NOT_ON_ROOT,
4358  		.open = peak_open,
4359  		.release = peak_release,
4360  		.seq_show = memory_peak_show,
4361  		.write = memory_peak_write,
4362  	},
4363  	{
4364  		.name = "min",
4365  		.flags = CFTYPE_NOT_ON_ROOT,
4366  		.seq_show = memory_min_show,
4367  		.write = memory_min_write,
4368  	},
4369  	{
4370  		.name = "low",
4371  		.flags = CFTYPE_NOT_ON_ROOT,
4372  		.seq_show = memory_low_show,
4373  		.write = memory_low_write,
4374  	},
4375  	{
4376  		.name = "high",
4377  		.flags = CFTYPE_NOT_ON_ROOT,
4378  		.seq_show = memory_high_show,
4379  		.write = memory_high_write,
4380  	},
4381  	{
4382  		.name = "max",
4383  		.flags = CFTYPE_NOT_ON_ROOT,
4384  		.seq_show = memory_max_show,
4385  		.write = memory_max_write,
4386  	},
4387  	{
4388  		.name = "events",
4389  		.flags = CFTYPE_NOT_ON_ROOT,
4390  		.file_offset = offsetof(struct mem_cgroup, events_file),
4391  		.seq_show = memory_events_show,
4392  	},
4393  	{
4394  		.name = "events.local",
4395  		.flags = CFTYPE_NOT_ON_ROOT,
4396  		.file_offset = offsetof(struct mem_cgroup, events_local_file),
4397  		.seq_show = memory_events_local_show,
4398  	},
4399  	{
4400  		.name = "stat",
4401  		.seq_show = memory_stat_show,
4402  	},
4403  #ifdef CONFIG_NUMA
4404  	{
4405  		.name = "numa_stat",
4406  		.seq_show = memory_numa_stat_show,
4407  	},
4408  #endif
4409  	{
4410  		.name = "oom.group",
4411  		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4412  		.seq_show = memory_oom_group_show,
4413  		.write = memory_oom_group_write,
4414  	},
4415  	{
4416  		.name = "reclaim",
4417  		.flags = CFTYPE_NS_DELEGATABLE,
4418  		.write = memory_reclaim,
4419  	},
4420  	{ }	/* terminate */
4421  };
4422  
4423  struct cgroup_subsys memory_cgrp_subsys = {
4424  	.css_alloc = mem_cgroup_css_alloc,
4425  	.css_online = mem_cgroup_css_online,
4426  	.css_offline = mem_cgroup_css_offline,
4427  	.css_released = mem_cgroup_css_released,
4428  	.css_free = mem_cgroup_css_free,
4429  	.css_reset = mem_cgroup_css_reset,
4430  	.css_rstat_flush = mem_cgroup_css_rstat_flush,
4431  	.attach = mem_cgroup_attach,
4432  	.fork = mem_cgroup_fork,
4433  	.exit = mem_cgroup_exit,
4434  	.dfl_cftypes = memory_files,
4435  #ifdef CONFIG_MEMCG_V1
4436  	.can_attach = memcg1_can_attach,
4437  	.cancel_attach = memcg1_cancel_attach,
4438  	.post_attach = memcg1_move_task,
4439  	.legacy_cftypes = mem_cgroup_legacy_files,
4440  #endif
4441  	.early_init = 0,
4442  };
4443  
4444  /**
4445   * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4446   * @root: the top ancestor of the sub-tree being checked
4447   * @memcg: the memory cgroup to check
4448   *
4449   * WARNING: This function is not stateless! It can only be used as part
4450   *          of a top-down tree iteration, not for isolated queries.
4451   */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)4452  void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4453  				     struct mem_cgroup *memcg)
4454  {
4455  	bool recursive_protection =
4456  		cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4457  
4458  	if (mem_cgroup_disabled())
4459  		return;
4460  
4461  	if (!root)
4462  		root = root_mem_cgroup;
4463  
4464  	page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4465  }
4466  
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)4467  static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4468  			gfp_t gfp)
4469  {
4470  	int ret;
4471  
4472  	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4473  	if (ret)
4474  		goto out;
4475  
4476  	mem_cgroup_commit_charge(folio, memcg);
4477  out:
4478  	return ret;
4479  }
4480  
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)4481  int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4482  {
4483  	struct mem_cgroup *memcg;
4484  	int ret;
4485  
4486  	memcg = get_mem_cgroup_from_mm(mm);
4487  	ret = charge_memcg(folio, memcg, gfp);
4488  	css_put(&memcg->css);
4489  
4490  	return ret;
4491  }
4492  
4493  /**
4494   * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
4495   * @memcg: memcg to charge.
4496   * @gfp: reclaim mode.
4497   * @nr_pages: number of pages to charge.
4498   *
4499   * This function is called when allocating a huge page folio to determine if
4500   * the memcg has the capacity for it. It does not commit the charge yet,
4501   * as the hugetlb folio itself has not been obtained from the hugetlb pool.
4502   *
4503   * Once we have obtained the hugetlb folio, we can call
4504   * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
4505   * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
4506   * of try_charge().
4507   *
4508   * Returns 0 on success. Otherwise, an error code is returned.
4509   */
mem_cgroup_hugetlb_try_charge(struct mem_cgroup * memcg,gfp_t gfp,long nr_pages)4510  int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
4511  			long nr_pages)
4512  {
4513  	/*
4514  	 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
4515  	 * but do not attempt to commit charge later (or cancel on error) either.
4516  	 */
4517  	if (mem_cgroup_disabled() || !memcg ||
4518  		!cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
4519  		!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
4520  		return -EOPNOTSUPP;
4521  
4522  	if (try_charge(memcg, gfp, nr_pages))
4523  		return -ENOMEM;
4524  
4525  	return 0;
4526  }
4527  
4528  /**
4529   * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4530   * @folio: folio to charge.
4531   * @mm: mm context of the victim
4532   * @gfp: reclaim mode
4533   * @entry: swap entry for which the folio is allocated
4534   *
4535   * This function charges a folio allocated for swapin. Please call this before
4536   * adding the folio to the swapcache.
4537   *
4538   * Returns 0 on success. Otherwise, an error code is returned.
4539   */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)4540  int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4541  				  gfp_t gfp, swp_entry_t entry)
4542  {
4543  	struct mem_cgroup *memcg;
4544  	unsigned short id;
4545  	int ret;
4546  
4547  	if (mem_cgroup_disabled())
4548  		return 0;
4549  
4550  	id = lookup_swap_cgroup_id(entry);
4551  	rcu_read_lock();
4552  	memcg = mem_cgroup_from_id(id);
4553  	if (!memcg || !css_tryget_online(&memcg->css))
4554  		memcg = get_mem_cgroup_from_mm(mm);
4555  	rcu_read_unlock();
4556  
4557  	ret = charge_memcg(folio, memcg, gfp);
4558  
4559  	css_put(&memcg->css);
4560  	return ret;
4561  }
4562  
4563  /*
4564   * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
4565   * @entry: the first swap entry for which the pages are charged
4566   * @nr_pages: number of pages which will be uncharged
4567   *
4568   * Call this function after successfully adding the charged page to swapcache.
4569   *
4570   * Note: This function assumes the page for which swap slot is being uncharged
4571   * is order 0 page.
4572   */
mem_cgroup_swapin_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)4573  void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
4574  {
4575  	/*
4576  	 * Cgroup1's unified memory+swap counter has been charged with the
4577  	 * new swapcache page, finish the transfer by uncharging the swap
4578  	 * slot. The swap slot would also get uncharged when it dies, but
4579  	 * it can stick around indefinitely and we'd count the page twice
4580  	 * the entire time.
4581  	 *
4582  	 * Cgroup2 has separate resource counters for memory and swap,
4583  	 * so this is a non-issue here. Memory and swap charge lifetimes
4584  	 * correspond 1:1 to page and swap slot lifetimes: we charge the
4585  	 * page to memory here, and uncharge swap when the slot is freed.
4586  	 */
4587  	if (!mem_cgroup_disabled() && do_memsw_account()) {
4588  		/*
4589  		 * The swap entry might not get freed for a long time,
4590  		 * let's not wait for it.  The page already received a
4591  		 * memory+swap charge, drop the swap entry duplicate.
4592  		 */
4593  		mem_cgroup_uncharge_swap(entry, nr_pages);
4594  	}
4595  }
4596  
4597  struct uncharge_gather {
4598  	struct mem_cgroup *memcg;
4599  	unsigned long nr_memory;
4600  	unsigned long pgpgout;
4601  	unsigned long nr_kmem;
4602  	int nid;
4603  };
4604  
uncharge_gather_clear(struct uncharge_gather * ug)4605  static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4606  {
4607  	memset(ug, 0, sizeof(*ug));
4608  }
4609  
uncharge_batch(const struct uncharge_gather * ug)4610  static void uncharge_batch(const struct uncharge_gather *ug)
4611  {
4612  	if (ug->nr_memory) {
4613  		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
4614  		if (do_memsw_account())
4615  			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
4616  		if (ug->nr_kmem) {
4617  			mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4618  			memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4619  		}
4620  		memcg1_oom_recover(ug->memcg);
4621  	}
4622  
4623  	memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4624  
4625  	/* drop reference from uncharge_folio */
4626  	css_put(&ug->memcg->css);
4627  }
4628  
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)4629  static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4630  {
4631  	long nr_pages;
4632  	struct mem_cgroup *memcg;
4633  	struct obj_cgroup *objcg;
4634  
4635  	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4636  
4637  	/*
4638  	 * Nobody should be changing or seriously looking at
4639  	 * folio memcg or objcg at this point, we have fully
4640  	 * exclusive access to the folio.
4641  	 */
4642  	if (folio_memcg_kmem(folio)) {
4643  		objcg = __folio_objcg(folio);
4644  		/*
4645  		 * This get matches the put at the end of the function and
4646  		 * kmem pages do not hold memcg references anymore.
4647  		 */
4648  		memcg = get_mem_cgroup_from_objcg(objcg);
4649  	} else {
4650  		memcg = __folio_memcg(folio);
4651  	}
4652  
4653  	if (!memcg)
4654  		return;
4655  
4656  	if (ug->memcg != memcg) {
4657  		if (ug->memcg) {
4658  			uncharge_batch(ug);
4659  			uncharge_gather_clear(ug);
4660  		}
4661  		ug->memcg = memcg;
4662  		ug->nid = folio_nid(folio);
4663  
4664  		/* pairs with css_put in uncharge_batch */
4665  		css_get(&memcg->css);
4666  	}
4667  
4668  	nr_pages = folio_nr_pages(folio);
4669  
4670  	if (folio_memcg_kmem(folio)) {
4671  		ug->nr_memory += nr_pages;
4672  		ug->nr_kmem += nr_pages;
4673  
4674  		folio->memcg_data = 0;
4675  		obj_cgroup_put(objcg);
4676  	} else {
4677  		/* LRU pages aren't accounted at the root level */
4678  		if (!mem_cgroup_is_root(memcg))
4679  			ug->nr_memory += nr_pages;
4680  		ug->pgpgout++;
4681  
4682  		WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4683  		folio->memcg_data = 0;
4684  	}
4685  
4686  	css_put(&memcg->css);
4687  }
4688  
__mem_cgroup_uncharge(struct folio * folio)4689  void __mem_cgroup_uncharge(struct folio *folio)
4690  {
4691  	struct uncharge_gather ug;
4692  
4693  	/* Don't touch folio->lru of any random page, pre-check: */
4694  	if (!folio_memcg_charged(folio))
4695  		return;
4696  
4697  	uncharge_gather_clear(&ug);
4698  	uncharge_folio(folio, &ug);
4699  	uncharge_batch(&ug);
4700  }
4701  
__mem_cgroup_uncharge_folios(struct folio_batch * folios)4702  void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4703  {
4704  	struct uncharge_gather ug;
4705  	unsigned int i;
4706  
4707  	uncharge_gather_clear(&ug);
4708  	for (i = 0; i < folios->nr; i++)
4709  		uncharge_folio(folios->folios[i], &ug);
4710  	if (ug.memcg)
4711  		uncharge_batch(&ug);
4712  }
4713  
4714  /**
4715   * mem_cgroup_replace_folio - Charge a folio's replacement.
4716   * @old: Currently circulating folio.
4717   * @new: Replacement folio.
4718   *
4719   * Charge @new as a replacement folio for @old. @old will
4720   * be uncharged upon free.
4721   *
4722   * Both folios must be locked, @new->mapping must be set up.
4723   */
mem_cgroup_replace_folio(struct folio * old,struct folio * new)4724  void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4725  {
4726  	struct mem_cgroup *memcg;
4727  	long nr_pages = folio_nr_pages(new);
4728  
4729  	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4730  	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4731  	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4732  	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4733  
4734  	if (mem_cgroup_disabled())
4735  		return;
4736  
4737  	/* Page cache replacement: new folio already charged? */
4738  	if (folio_memcg_charged(new))
4739  		return;
4740  
4741  	memcg = folio_memcg(old);
4742  	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4743  	if (!memcg)
4744  		return;
4745  
4746  	/* Force-charge the new page. The old one will be freed soon */
4747  	if (!mem_cgroup_is_root(memcg)) {
4748  		page_counter_charge(&memcg->memory, nr_pages);
4749  		if (do_memsw_account())
4750  			page_counter_charge(&memcg->memsw, nr_pages);
4751  	}
4752  
4753  	css_get(&memcg->css);
4754  	commit_charge(new, memcg);
4755  	memcg1_commit_charge(new, memcg);
4756  }
4757  
4758  /**
4759   * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4760   * @old: Currently circulating folio.
4761   * @new: Replacement folio.
4762   *
4763   * Transfer the memcg data from the old folio to the new folio for migration.
4764   * The old folio's data info will be cleared. Note that the memory counters
4765   * will remain unchanged throughout the process.
4766   *
4767   * Both folios must be locked, @new->mapping must be set up.
4768   */
mem_cgroup_migrate(struct folio * old,struct folio * new)4769  void mem_cgroup_migrate(struct folio *old, struct folio *new)
4770  {
4771  	struct mem_cgroup *memcg;
4772  
4773  	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4774  	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4775  	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4776  	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
4777  	VM_BUG_ON_FOLIO(folio_test_lru(old), old);
4778  
4779  	if (mem_cgroup_disabled())
4780  		return;
4781  
4782  	memcg = folio_memcg(old);
4783  	/*
4784  	 * Note that it is normal to see !memcg for a hugetlb folio.
4785  	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
4786  	 * was not selected.
4787  	 */
4788  	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
4789  	if (!memcg)
4790  		return;
4791  
4792  	/* Transfer the charge and the css ref */
4793  	commit_charge(new, memcg);
4794  
4795  	/* Warning should never happen, so don't worry about refcount non-0 */
4796  	WARN_ON_ONCE(folio_unqueue_deferred_split(old));
4797  	old->memcg_data = 0;
4798  }
4799  
4800  DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
4801  EXPORT_SYMBOL(memcg_sockets_enabled_key);
4802  
mem_cgroup_sk_alloc(struct sock * sk)4803  void mem_cgroup_sk_alloc(struct sock *sk)
4804  {
4805  	struct mem_cgroup *memcg;
4806  
4807  	if (!mem_cgroup_sockets_enabled)
4808  		return;
4809  
4810  	/* Do not associate the sock with unrelated interrupted task's memcg. */
4811  	if (!in_task())
4812  		return;
4813  
4814  	rcu_read_lock();
4815  	memcg = mem_cgroup_from_task(current);
4816  	if (mem_cgroup_is_root(memcg))
4817  		goto out;
4818  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
4819  		goto out;
4820  	if (css_tryget(&memcg->css))
4821  		sk->sk_memcg = memcg;
4822  out:
4823  	rcu_read_unlock();
4824  }
4825  
mem_cgroup_sk_free(struct sock * sk)4826  void mem_cgroup_sk_free(struct sock *sk)
4827  {
4828  	if (sk->sk_memcg)
4829  		css_put(&sk->sk_memcg->css);
4830  }
4831  
4832  /**
4833   * mem_cgroup_charge_skmem - charge socket memory
4834   * @memcg: memcg to charge
4835   * @nr_pages: number of pages to charge
4836   * @gfp_mask: reclaim mode
4837   *
4838   * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4839   * @memcg's configured limit, %false if it doesn't.
4840   */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)4841  bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
4842  			     gfp_t gfp_mask)
4843  {
4844  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
4845  		return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
4846  
4847  	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
4848  		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
4849  		return true;
4850  	}
4851  
4852  	return false;
4853  }
4854  
4855  /**
4856   * mem_cgroup_uncharge_skmem - uncharge socket memory
4857   * @memcg: memcg to uncharge
4858   * @nr_pages: number of pages to uncharge
4859   */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)4860  void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
4861  {
4862  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
4863  		memcg1_uncharge_skmem(memcg, nr_pages);
4864  		return;
4865  	}
4866  
4867  	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
4868  
4869  	refill_stock(memcg, nr_pages);
4870  }
4871  
cgroup_memory(char * s)4872  static int __init cgroup_memory(char *s)
4873  {
4874  	char *token;
4875  
4876  	while ((token = strsep(&s, ",")) != NULL) {
4877  		if (!*token)
4878  			continue;
4879  		if (!strcmp(token, "nosocket"))
4880  			cgroup_memory_nosocket = true;
4881  		if (!strcmp(token, "nokmem"))
4882  			cgroup_memory_nokmem = true;
4883  		if (!strcmp(token, "nobpf"))
4884  			cgroup_memory_nobpf = true;
4885  	}
4886  	return 1;
4887  }
4888  __setup("cgroup.memory=", cgroup_memory);
4889  
4890  /*
4891   * subsys_initcall() for memory controller.
4892   *
4893   * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
4894   * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4895   * basically everything that doesn't depend on a specific mem_cgroup structure
4896   * should be initialized from here.
4897   */
mem_cgroup_init(void)4898  static int __init mem_cgroup_init(void)
4899  {
4900  	int cpu;
4901  
4902  	/*
4903  	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
4904  	 * used for per-memcg-per-cpu caching of per-node statistics. In order
4905  	 * to work fine, we should make sure that the overfill threshold can't
4906  	 * exceed S32_MAX / PAGE_SIZE.
4907  	 */
4908  	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
4909  
4910  	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
4911  				  memcg_hotplug_cpu_dead);
4912  
4913  	for_each_possible_cpu(cpu)
4914  		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
4915  			  drain_local_stock);
4916  
4917  	return 0;
4918  }
4919  subsys_initcall(mem_cgroup_init);
4920  
4921  #ifdef CONFIG_SWAP
mem_cgroup_id_get_online(struct mem_cgroup * memcg)4922  static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
4923  {
4924  	while (!refcount_inc_not_zero(&memcg->id.ref)) {
4925  		/*
4926  		 * The root cgroup cannot be destroyed, so it's refcount must
4927  		 * always be >= 1.
4928  		 */
4929  		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
4930  			VM_BUG_ON(1);
4931  			break;
4932  		}
4933  		memcg = parent_mem_cgroup(memcg);
4934  		if (!memcg)
4935  			memcg = root_mem_cgroup;
4936  	}
4937  	return memcg;
4938  }
4939  
4940  /**
4941   * mem_cgroup_swapout - transfer a memsw charge to swap
4942   * @folio: folio whose memsw charge to transfer
4943   * @entry: swap entry to move the charge to
4944   *
4945   * Transfer the memsw charge of @folio to @entry.
4946   */
mem_cgroup_swapout(struct folio * folio,swp_entry_t entry)4947  void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
4948  {
4949  	struct mem_cgroup *memcg, *swap_memcg;
4950  	unsigned int nr_entries;
4951  	unsigned short oldid;
4952  
4953  	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4954  	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
4955  
4956  	if (mem_cgroup_disabled())
4957  		return;
4958  
4959  	if (!do_memsw_account())
4960  		return;
4961  
4962  	memcg = folio_memcg(folio);
4963  
4964  	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
4965  	if (!memcg)
4966  		return;
4967  
4968  	/*
4969  	 * In case the memcg owning these pages has been offlined and doesn't
4970  	 * have an ID allocated to it anymore, charge the closest online
4971  	 * ancestor for the swap instead and transfer the memory+swap charge.
4972  	 */
4973  	swap_memcg = mem_cgroup_id_get_online(memcg);
4974  	nr_entries = folio_nr_pages(folio);
4975  	/* Get references for the tail pages, too */
4976  	if (nr_entries > 1)
4977  		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
4978  	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
4979  				   nr_entries);
4980  	VM_BUG_ON_FOLIO(oldid, folio);
4981  	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
4982  
4983  	folio_unqueue_deferred_split(folio);
4984  	folio->memcg_data = 0;
4985  
4986  	if (!mem_cgroup_is_root(memcg))
4987  		page_counter_uncharge(&memcg->memory, nr_entries);
4988  
4989  	if (memcg != swap_memcg) {
4990  		if (!mem_cgroup_is_root(swap_memcg))
4991  			page_counter_charge(&swap_memcg->memsw, nr_entries);
4992  		page_counter_uncharge(&memcg->memsw, nr_entries);
4993  	}
4994  
4995  	memcg1_swapout(folio, memcg);
4996  	css_put(&memcg->css);
4997  }
4998  
4999  /**
5000   * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5001   * @folio: folio being added to swap
5002   * @entry: swap entry to charge
5003   *
5004   * Try to charge @folio's memcg for the swap space at @entry.
5005   *
5006   * Returns 0 on success, -ENOMEM on failure.
5007   */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)5008  int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5009  {
5010  	unsigned int nr_pages = folio_nr_pages(folio);
5011  	struct page_counter *counter;
5012  	struct mem_cgroup *memcg;
5013  	unsigned short oldid;
5014  
5015  	if (do_memsw_account())
5016  		return 0;
5017  
5018  	memcg = folio_memcg(folio);
5019  
5020  	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5021  	if (!memcg)
5022  		return 0;
5023  
5024  	if (!entry.val) {
5025  		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5026  		return 0;
5027  	}
5028  
5029  	memcg = mem_cgroup_id_get_online(memcg);
5030  
5031  	if (!mem_cgroup_is_root(memcg) &&
5032  	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5033  		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5034  		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5035  		mem_cgroup_id_put(memcg);
5036  		return -ENOMEM;
5037  	}
5038  
5039  	/* Get references for the tail pages, too */
5040  	if (nr_pages > 1)
5041  		mem_cgroup_id_get_many(memcg, nr_pages - 1);
5042  	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
5043  	VM_BUG_ON_FOLIO(oldid, folio);
5044  	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5045  
5046  	return 0;
5047  }
5048  
5049  /**
5050   * __mem_cgroup_uncharge_swap - uncharge swap space
5051   * @entry: swap entry to uncharge
5052   * @nr_pages: the amount of swap space to uncharge
5053   */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)5054  void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5055  {
5056  	struct mem_cgroup *memcg;
5057  	unsigned short id;
5058  
5059  	id = swap_cgroup_record(entry, 0, nr_pages);
5060  	rcu_read_lock();
5061  	memcg = mem_cgroup_from_id(id);
5062  	if (memcg) {
5063  		if (!mem_cgroup_is_root(memcg)) {
5064  			if (do_memsw_account())
5065  				page_counter_uncharge(&memcg->memsw, nr_pages);
5066  			else
5067  				page_counter_uncharge(&memcg->swap, nr_pages);
5068  		}
5069  		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5070  		mem_cgroup_id_put_many(memcg, nr_pages);
5071  	}
5072  	rcu_read_unlock();
5073  }
5074  
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)5075  long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5076  {
5077  	long nr_swap_pages = get_nr_swap_pages();
5078  
5079  	if (mem_cgroup_disabled() || do_memsw_account())
5080  		return nr_swap_pages;
5081  	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5082  		nr_swap_pages = min_t(long, nr_swap_pages,
5083  				      READ_ONCE(memcg->swap.max) -
5084  				      page_counter_read(&memcg->swap));
5085  	return nr_swap_pages;
5086  }
5087  
mem_cgroup_swap_full(struct folio * folio)5088  bool mem_cgroup_swap_full(struct folio *folio)
5089  {
5090  	struct mem_cgroup *memcg;
5091  
5092  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5093  
5094  	if (vm_swap_full())
5095  		return true;
5096  	if (do_memsw_account())
5097  		return false;
5098  
5099  	memcg = folio_memcg(folio);
5100  	if (!memcg)
5101  		return false;
5102  
5103  	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5104  		unsigned long usage = page_counter_read(&memcg->swap);
5105  
5106  		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5107  		    usage * 2 >= READ_ONCE(memcg->swap.max))
5108  			return true;
5109  	}
5110  
5111  	return false;
5112  }
5113  
setup_swap_account(char * s)5114  static int __init setup_swap_account(char *s)
5115  {
5116  	bool res;
5117  
5118  	if (!kstrtobool(s, &res) && !res)
5119  		pr_warn_once("The swapaccount=0 commandline option is deprecated "
5120  			     "in favor of configuring swap control via cgroupfs. "
5121  			     "Please report your usecase to linux-mm@kvack.org if you "
5122  			     "depend on this functionality.\n");
5123  	return 1;
5124  }
5125  __setup("swapaccount=", setup_swap_account);
5126  
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5127  static u64 swap_current_read(struct cgroup_subsys_state *css,
5128  			     struct cftype *cft)
5129  {
5130  	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5131  
5132  	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5133  }
5134  
swap_peak_show(struct seq_file * sf,void * v)5135  static int swap_peak_show(struct seq_file *sf, void *v)
5136  {
5137  	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5138  
5139  	return peak_show(sf, v, &memcg->swap);
5140  }
5141  
swap_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5142  static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5143  			       size_t nbytes, loff_t off)
5144  {
5145  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5146  
5147  	return peak_write(of, buf, nbytes, off, &memcg->swap,
5148  			  &memcg->swap_peaks);
5149  }
5150  
swap_high_show(struct seq_file * m,void * v)5151  static int swap_high_show(struct seq_file *m, void *v)
5152  {
5153  	return seq_puts_memcg_tunable(m,
5154  		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5155  }
5156  
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5157  static ssize_t swap_high_write(struct kernfs_open_file *of,
5158  			       char *buf, size_t nbytes, loff_t off)
5159  {
5160  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5161  	unsigned long high;
5162  	int err;
5163  
5164  	buf = strstrip(buf);
5165  	err = page_counter_memparse(buf, "max", &high);
5166  	if (err)
5167  		return err;
5168  
5169  	page_counter_set_high(&memcg->swap, high);
5170  
5171  	return nbytes;
5172  }
5173  
swap_max_show(struct seq_file * m,void * v)5174  static int swap_max_show(struct seq_file *m, void *v)
5175  {
5176  	return seq_puts_memcg_tunable(m,
5177  		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5178  }
5179  
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5180  static ssize_t swap_max_write(struct kernfs_open_file *of,
5181  			      char *buf, size_t nbytes, loff_t off)
5182  {
5183  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5184  	unsigned long max;
5185  	int err;
5186  
5187  	buf = strstrip(buf);
5188  	err = page_counter_memparse(buf, "max", &max);
5189  	if (err)
5190  		return err;
5191  
5192  	xchg(&memcg->swap.max, max);
5193  
5194  	return nbytes;
5195  }
5196  
swap_events_show(struct seq_file * m,void * v)5197  static int swap_events_show(struct seq_file *m, void *v)
5198  {
5199  	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5200  
5201  	seq_printf(m, "high %lu\n",
5202  		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5203  	seq_printf(m, "max %lu\n",
5204  		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5205  	seq_printf(m, "fail %lu\n",
5206  		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5207  
5208  	return 0;
5209  }
5210  
5211  static struct cftype swap_files[] = {
5212  	{
5213  		.name = "swap.current",
5214  		.flags = CFTYPE_NOT_ON_ROOT,
5215  		.read_u64 = swap_current_read,
5216  	},
5217  	{
5218  		.name = "swap.high",
5219  		.flags = CFTYPE_NOT_ON_ROOT,
5220  		.seq_show = swap_high_show,
5221  		.write = swap_high_write,
5222  	},
5223  	{
5224  		.name = "swap.max",
5225  		.flags = CFTYPE_NOT_ON_ROOT,
5226  		.seq_show = swap_max_show,
5227  		.write = swap_max_write,
5228  	},
5229  	{
5230  		.name = "swap.peak",
5231  		.flags = CFTYPE_NOT_ON_ROOT,
5232  		.open = peak_open,
5233  		.release = peak_release,
5234  		.seq_show = swap_peak_show,
5235  		.write = swap_peak_write,
5236  	},
5237  	{
5238  		.name = "swap.events",
5239  		.flags = CFTYPE_NOT_ON_ROOT,
5240  		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
5241  		.seq_show = swap_events_show,
5242  	},
5243  	{ }	/* terminate */
5244  };
5245  
5246  #ifdef CONFIG_ZSWAP
5247  /**
5248   * obj_cgroup_may_zswap - check if this cgroup can zswap
5249   * @objcg: the object cgroup
5250   *
5251   * Check if the hierarchical zswap limit has been reached.
5252   *
5253   * This doesn't check for specific headroom, and it is not atomic
5254   * either. But with zswap, the size of the allocation is only known
5255   * once compression has occurred, and this optimistic pre-check avoids
5256   * spending cycles on compression when there is already no room left
5257   * or zswap is disabled altogether somewhere in the hierarchy.
5258   */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)5259  bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5260  {
5261  	struct mem_cgroup *memcg, *original_memcg;
5262  	bool ret = true;
5263  
5264  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5265  		return true;
5266  
5267  	original_memcg = get_mem_cgroup_from_objcg(objcg);
5268  	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5269  	     memcg = parent_mem_cgroup(memcg)) {
5270  		unsigned long max = READ_ONCE(memcg->zswap_max);
5271  		unsigned long pages;
5272  
5273  		if (max == PAGE_COUNTER_MAX)
5274  			continue;
5275  		if (max == 0) {
5276  			ret = false;
5277  			break;
5278  		}
5279  
5280  		/*
5281  		 * mem_cgroup_flush_stats() ignores small changes. Use
5282  		 * do_flush_stats() directly to get accurate stats for charging.
5283  		 */
5284  		do_flush_stats(memcg);
5285  		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5286  		if (pages < max)
5287  			continue;
5288  		ret = false;
5289  		break;
5290  	}
5291  	mem_cgroup_put(original_memcg);
5292  	return ret;
5293  }
5294  
5295  /**
5296   * obj_cgroup_charge_zswap - charge compression backend memory
5297   * @objcg: the object cgroup
5298   * @size: size of compressed object
5299   *
5300   * This forces the charge after obj_cgroup_may_zswap() allowed
5301   * compression and storage in zwap for this cgroup to go ahead.
5302   */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)5303  void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5304  {
5305  	struct mem_cgroup *memcg;
5306  
5307  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5308  		return;
5309  
5310  	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5311  
5312  	/* PF_MEMALLOC context, charging must succeed */
5313  	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5314  		VM_WARN_ON_ONCE(1);
5315  
5316  	rcu_read_lock();
5317  	memcg = obj_cgroup_memcg(objcg);
5318  	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5319  	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5320  	rcu_read_unlock();
5321  }
5322  
5323  /**
5324   * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5325   * @objcg: the object cgroup
5326   * @size: size of compressed object
5327   *
5328   * Uncharges zswap memory on page in.
5329   */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)5330  void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5331  {
5332  	struct mem_cgroup *memcg;
5333  
5334  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5335  		return;
5336  
5337  	obj_cgroup_uncharge(objcg, size);
5338  
5339  	rcu_read_lock();
5340  	memcg = obj_cgroup_memcg(objcg);
5341  	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5342  	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5343  	rcu_read_unlock();
5344  }
5345  
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)5346  bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5347  {
5348  	/* if zswap is disabled, do not block pages going to the swapping device */
5349  	if (!zswap_is_enabled())
5350  		return true;
5351  
5352  	for (; memcg; memcg = parent_mem_cgroup(memcg))
5353  		if (!READ_ONCE(memcg->zswap_writeback))
5354  			return false;
5355  
5356  	return true;
5357  }
5358  
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5359  static u64 zswap_current_read(struct cgroup_subsys_state *css,
5360  			      struct cftype *cft)
5361  {
5362  	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5363  
5364  	mem_cgroup_flush_stats(memcg);
5365  	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5366  }
5367  
zswap_max_show(struct seq_file * m,void * v)5368  static int zswap_max_show(struct seq_file *m, void *v)
5369  {
5370  	return seq_puts_memcg_tunable(m,
5371  		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5372  }
5373  
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5374  static ssize_t zswap_max_write(struct kernfs_open_file *of,
5375  			       char *buf, size_t nbytes, loff_t off)
5376  {
5377  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5378  	unsigned long max;
5379  	int err;
5380  
5381  	buf = strstrip(buf);
5382  	err = page_counter_memparse(buf, "max", &max);
5383  	if (err)
5384  		return err;
5385  
5386  	xchg(&memcg->zswap_max, max);
5387  
5388  	return nbytes;
5389  }
5390  
zswap_writeback_show(struct seq_file * m,void * v)5391  static int zswap_writeback_show(struct seq_file *m, void *v)
5392  {
5393  	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5394  
5395  	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5396  	return 0;
5397  }
5398  
zswap_writeback_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5399  static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5400  				char *buf, size_t nbytes, loff_t off)
5401  {
5402  	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5403  	int zswap_writeback;
5404  	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5405  
5406  	if (parse_ret)
5407  		return parse_ret;
5408  
5409  	if (zswap_writeback != 0 && zswap_writeback != 1)
5410  		return -EINVAL;
5411  
5412  	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5413  	return nbytes;
5414  }
5415  
5416  static struct cftype zswap_files[] = {
5417  	{
5418  		.name = "zswap.current",
5419  		.flags = CFTYPE_NOT_ON_ROOT,
5420  		.read_u64 = zswap_current_read,
5421  	},
5422  	{
5423  		.name = "zswap.max",
5424  		.flags = CFTYPE_NOT_ON_ROOT,
5425  		.seq_show = zswap_max_show,
5426  		.write = zswap_max_write,
5427  	},
5428  	{
5429  		.name = "zswap.writeback",
5430  		.seq_show = zswap_writeback_show,
5431  		.write = zswap_writeback_write,
5432  	},
5433  	{ }	/* terminate */
5434  };
5435  #endif /* CONFIG_ZSWAP */
5436  
mem_cgroup_swap_init(void)5437  static int __init mem_cgroup_swap_init(void)
5438  {
5439  	if (mem_cgroup_disabled())
5440  		return 0;
5441  
5442  	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5443  #ifdef CONFIG_MEMCG_V1
5444  	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5445  #endif
5446  #ifdef CONFIG_ZSWAP
5447  	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5448  #endif
5449  	return 0;
5450  }
5451  subsys_initcall(mem_cgroup_swap_init);
5452  
5453  #endif /* CONFIG_SWAP */
5454