1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_PAGE_REF_H
3  #define _LINUX_PAGE_REF_H
4  
5  #include <linux/atomic.h>
6  #include <linux/mm_types.h>
7  #include <linux/page-flags.h>
8  #include <linux/tracepoint-defs.h>
9  
10  DECLARE_TRACEPOINT(page_ref_set);
11  DECLARE_TRACEPOINT(page_ref_mod);
12  DECLARE_TRACEPOINT(page_ref_mod_and_test);
13  DECLARE_TRACEPOINT(page_ref_mod_and_return);
14  DECLARE_TRACEPOINT(page_ref_mod_unless);
15  DECLARE_TRACEPOINT(page_ref_freeze);
16  DECLARE_TRACEPOINT(page_ref_unfreeze);
17  
18  #ifdef CONFIG_DEBUG_PAGE_REF
19  
20  /*
21   * Ideally we would want to use the trace_<tracepoint>_enabled() helper
22   * functions. But due to include header file issues, that is not
23   * feasible. Instead we have to open code the static key functions.
24   *
25   * See trace_##name##_enabled(void) in include/linux/tracepoint.h
26   */
27  #define page_ref_tracepoint_active(t) tracepoint_enabled(t)
28  
29  extern void __page_ref_set(struct page *page, int v);
30  extern void __page_ref_mod(struct page *page, int v);
31  extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
32  extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
33  extern void __page_ref_mod_unless(struct page *page, int v, int u);
34  extern void __page_ref_freeze(struct page *page, int v, int ret);
35  extern void __page_ref_unfreeze(struct page *page, int v);
36  
37  #else
38  
39  #define page_ref_tracepoint_active(t) false
40  
__page_ref_set(struct page * page,int v)41  static inline void __page_ref_set(struct page *page, int v)
42  {
43  }
__page_ref_mod(struct page * page,int v)44  static inline void __page_ref_mod(struct page *page, int v)
45  {
46  }
__page_ref_mod_and_test(struct page * page,int v,int ret)47  static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
48  {
49  }
__page_ref_mod_and_return(struct page * page,int v,int ret)50  static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
51  {
52  }
__page_ref_mod_unless(struct page * page,int v,int u)53  static inline void __page_ref_mod_unless(struct page *page, int v, int u)
54  {
55  }
__page_ref_freeze(struct page * page,int v,int ret)56  static inline void __page_ref_freeze(struct page *page, int v, int ret)
57  {
58  }
__page_ref_unfreeze(struct page * page,int v)59  static inline void __page_ref_unfreeze(struct page *page, int v)
60  {
61  }
62  
63  #endif
64  
page_ref_count(const struct page * page)65  static inline int page_ref_count(const struct page *page)
66  {
67  	return atomic_read(&page->_refcount);
68  }
69  
70  /**
71   * folio_ref_count - The reference count on this folio.
72   * @folio: The folio.
73   *
74   * The refcount is usually incremented by calls to folio_get() and
75   * decremented by calls to folio_put().  Some typical users of the
76   * folio refcount:
77   *
78   * - Each reference from a page table
79   * - The page cache
80   * - Filesystem private data
81   * - The LRU list
82   * - Pipes
83   * - Direct IO which references this page in the process address space
84   *
85   * Return: The number of references to this folio.
86   */
folio_ref_count(const struct folio * folio)87  static inline int folio_ref_count(const struct folio *folio)
88  {
89  	return page_ref_count(&folio->page);
90  }
91  
page_count(const struct page * page)92  static inline int page_count(const struct page *page)
93  {
94  	return folio_ref_count(page_folio(page));
95  }
96  
set_page_count(struct page * page,int v)97  static inline void set_page_count(struct page *page, int v)
98  {
99  	atomic_set(&page->_refcount, v);
100  	if (page_ref_tracepoint_active(page_ref_set))
101  		__page_ref_set(page, v);
102  }
103  
folio_set_count(struct folio * folio,int v)104  static inline void folio_set_count(struct folio *folio, int v)
105  {
106  	set_page_count(&folio->page, v);
107  }
108  
109  /*
110   * Setup the page count before being freed into the page allocator for
111   * the first time (boot or memory hotplug)
112   */
init_page_count(struct page * page)113  static inline void init_page_count(struct page *page)
114  {
115  	set_page_count(page, 1);
116  }
117  
page_ref_add(struct page * page,int nr)118  static inline void page_ref_add(struct page *page, int nr)
119  {
120  	atomic_add(nr, &page->_refcount);
121  	if (page_ref_tracepoint_active(page_ref_mod))
122  		__page_ref_mod(page, nr);
123  }
124  
folio_ref_add(struct folio * folio,int nr)125  static inline void folio_ref_add(struct folio *folio, int nr)
126  {
127  	page_ref_add(&folio->page, nr);
128  }
129  
page_ref_sub(struct page * page,int nr)130  static inline void page_ref_sub(struct page *page, int nr)
131  {
132  	atomic_sub(nr, &page->_refcount);
133  	if (page_ref_tracepoint_active(page_ref_mod))
134  		__page_ref_mod(page, -nr);
135  }
136  
folio_ref_sub(struct folio * folio,int nr)137  static inline void folio_ref_sub(struct folio *folio, int nr)
138  {
139  	page_ref_sub(&folio->page, nr);
140  }
141  
folio_ref_sub_return(struct folio * folio,int nr)142  static inline int folio_ref_sub_return(struct folio *folio, int nr)
143  {
144  	int ret = atomic_sub_return(nr, &folio->_refcount);
145  
146  	if (page_ref_tracepoint_active(page_ref_mod_and_return))
147  		__page_ref_mod_and_return(&folio->page, -nr, ret);
148  	return ret;
149  }
150  
page_ref_inc(struct page * page)151  static inline void page_ref_inc(struct page *page)
152  {
153  	atomic_inc(&page->_refcount);
154  	if (page_ref_tracepoint_active(page_ref_mod))
155  		__page_ref_mod(page, 1);
156  }
157  
folio_ref_inc(struct folio * folio)158  static inline void folio_ref_inc(struct folio *folio)
159  {
160  	page_ref_inc(&folio->page);
161  }
162  
page_ref_dec(struct page * page)163  static inline void page_ref_dec(struct page *page)
164  {
165  	atomic_dec(&page->_refcount);
166  	if (page_ref_tracepoint_active(page_ref_mod))
167  		__page_ref_mod(page, -1);
168  }
169  
folio_ref_dec(struct folio * folio)170  static inline void folio_ref_dec(struct folio *folio)
171  {
172  	page_ref_dec(&folio->page);
173  }
174  
page_ref_sub_and_test(struct page * page,int nr)175  static inline int page_ref_sub_and_test(struct page *page, int nr)
176  {
177  	int ret = atomic_sub_and_test(nr, &page->_refcount);
178  
179  	if (page_ref_tracepoint_active(page_ref_mod_and_test))
180  		__page_ref_mod_and_test(page, -nr, ret);
181  	return ret;
182  }
183  
folio_ref_sub_and_test(struct folio * folio,int nr)184  static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
185  {
186  	return page_ref_sub_and_test(&folio->page, nr);
187  }
188  
page_ref_inc_return(struct page * page)189  static inline int page_ref_inc_return(struct page *page)
190  {
191  	int ret = atomic_inc_return(&page->_refcount);
192  
193  	if (page_ref_tracepoint_active(page_ref_mod_and_return))
194  		__page_ref_mod_and_return(page, 1, ret);
195  	return ret;
196  }
197  
folio_ref_inc_return(struct folio * folio)198  static inline int folio_ref_inc_return(struct folio *folio)
199  {
200  	return page_ref_inc_return(&folio->page);
201  }
202  
page_ref_dec_and_test(struct page * page)203  static inline int page_ref_dec_and_test(struct page *page)
204  {
205  	int ret = atomic_dec_and_test(&page->_refcount);
206  
207  	if (page_ref_tracepoint_active(page_ref_mod_and_test))
208  		__page_ref_mod_and_test(page, -1, ret);
209  	return ret;
210  }
211  
folio_ref_dec_and_test(struct folio * folio)212  static inline int folio_ref_dec_and_test(struct folio *folio)
213  {
214  	return page_ref_dec_and_test(&folio->page);
215  }
216  
page_ref_dec_return(struct page * page)217  static inline int page_ref_dec_return(struct page *page)
218  {
219  	int ret = atomic_dec_return(&page->_refcount);
220  
221  	if (page_ref_tracepoint_active(page_ref_mod_and_return))
222  		__page_ref_mod_and_return(page, -1, ret);
223  	return ret;
224  }
225  
folio_ref_dec_return(struct folio * folio)226  static inline int folio_ref_dec_return(struct folio *folio)
227  {
228  	return page_ref_dec_return(&folio->page);
229  }
230  
page_ref_add_unless(struct page * page,int nr,int u)231  static inline bool page_ref_add_unless(struct page *page, int nr, int u)
232  {
233  	bool ret = false;
234  
235  	rcu_read_lock();
236  	/* avoid writing to the vmemmap area being remapped */
237  	if (!page_is_fake_head(page) && page_ref_count(page) != u)
238  		ret = atomic_add_unless(&page->_refcount, nr, u);
239  	rcu_read_unlock();
240  
241  	if (page_ref_tracepoint_active(page_ref_mod_unless))
242  		__page_ref_mod_unless(page, nr, ret);
243  	return ret;
244  }
245  
folio_ref_add_unless(struct folio * folio,int nr,int u)246  static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
247  {
248  	return page_ref_add_unless(&folio->page, nr, u);
249  }
250  
251  /**
252   * folio_try_get - Attempt to increase the refcount on a folio.
253   * @folio: The folio.
254   *
255   * If you do not already have a reference to a folio, you can attempt to
256   * get one using this function.  It may fail if, for example, the folio
257   * has been freed since you found a pointer to it, or it is frozen for
258   * the purposes of splitting or migration.
259   *
260   * Return: True if the reference count was successfully incremented.
261   */
folio_try_get(struct folio * folio)262  static inline bool folio_try_get(struct folio *folio)
263  {
264  	return folio_ref_add_unless(folio, 1, 0);
265  }
266  
folio_ref_try_add(struct folio * folio,int count)267  static inline bool folio_ref_try_add(struct folio *folio, int count)
268  {
269  	return folio_ref_add_unless(folio, count, 0);
270  }
271  
page_ref_freeze(struct page * page,int count)272  static inline int page_ref_freeze(struct page *page, int count)
273  {
274  	int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
275  
276  	if (page_ref_tracepoint_active(page_ref_freeze))
277  		__page_ref_freeze(page, count, ret);
278  	return ret;
279  }
280  
folio_ref_freeze(struct folio * folio,int count)281  static inline int folio_ref_freeze(struct folio *folio, int count)
282  {
283  	return page_ref_freeze(&folio->page, count);
284  }
285  
page_ref_unfreeze(struct page * page,int count)286  static inline void page_ref_unfreeze(struct page *page, int count)
287  {
288  	VM_BUG_ON_PAGE(page_count(page) != 0, page);
289  	VM_BUG_ON(count == 0);
290  
291  	atomic_set_release(&page->_refcount, count);
292  	if (page_ref_tracepoint_active(page_ref_unfreeze))
293  		__page_ref_unfreeze(page, count);
294  }
295  
folio_ref_unfreeze(struct folio * folio,int count)296  static inline void folio_ref_unfreeze(struct folio *folio, int count)
297  {
298  	page_ref_unfreeze(&folio->page, count);
299  }
300  #endif
301