1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /* Internal definitions for network filesystem support
3   *
4   * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5   * Written by David Howells (dhowells@redhat.com)
6   */
7  
8  #include <linux/slab.h>
9  #include <linux/seq_file.h>
10  #include <linux/folio_queue.h>
11  #include <linux/netfs.h>
12  #include <linux/fscache.h>
13  #include <linux/fscache-cache.h>
14  #include <trace/events/netfs.h>
15  #include <trace/events/fscache.h>
16  
17  #ifdef pr_fmt
18  #undef pr_fmt
19  #endif
20  
21  #define pr_fmt(fmt) "netfs: " fmt
22  
23  /*
24   * buffered_read.c
25   */
26  int netfs_prefetch_for_write(struct file *file, struct folio *folio,
27  			     size_t offset, size_t len);
28  
29  /*
30   * main.c
31   */
32  extern unsigned int netfs_debug;
33  extern struct list_head netfs_io_requests;
34  extern spinlock_t netfs_proc_lock;
35  extern mempool_t netfs_request_pool;
36  extern mempool_t netfs_subrequest_pool;
37  
38  #ifdef CONFIG_PROC_FS
netfs_proc_add_rreq(struct netfs_io_request * rreq)39  static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
40  {
41  	spin_lock(&netfs_proc_lock);
42  	list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
43  	spin_unlock(&netfs_proc_lock);
44  }
netfs_proc_del_rreq(struct netfs_io_request * rreq)45  static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
46  {
47  	if (!list_empty(&rreq->proc_link)) {
48  		spin_lock(&netfs_proc_lock);
49  		list_del_rcu(&rreq->proc_link);
50  		spin_unlock(&netfs_proc_lock);
51  	}
52  }
53  #else
netfs_proc_add_rreq(struct netfs_io_request * rreq)54  static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
netfs_proc_del_rreq(struct netfs_io_request * rreq)55  static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
56  #endif
57  
58  /*
59   * misc.c
60   */
61  struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq);
62  int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
63  			      bool needs_put);
64  struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
65  void netfs_clear_buffer(struct netfs_io_request *rreq);
66  void netfs_reset_iter(struct netfs_io_subrequest *subreq);
67  
68  /*
69   * objects.c
70   */
71  struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
72  					     struct file *file,
73  					     loff_t start, size_t len,
74  					     enum netfs_io_origin origin);
75  void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
76  void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
77  void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
78  		       enum netfs_rreq_ref_trace what);
79  struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
80  
netfs_see_request(struct netfs_io_request * rreq,enum netfs_rreq_ref_trace what)81  static inline void netfs_see_request(struct netfs_io_request *rreq,
82  				     enum netfs_rreq_ref_trace what)
83  {
84  	trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
85  }
86  
87  /*
88   * read_collect.c
89   */
90  void netfs_read_termination_worker(struct work_struct *work);
91  void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async);
92  
93  /*
94   * read_pgpriv2.c
95   */
96  void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
97  				      struct netfs_io_request *rreq,
98  				      struct folio_queue *folioq,
99  				      int slot);
100  void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq);
101  bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq);
102  
103  /*
104   * read_retry.c
105   */
106  void netfs_retry_reads(struct netfs_io_request *rreq);
107  void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq);
108  
109  /*
110   * stats.c
111   */
112  #ifdef CONFIG_NETFS_STATS
113  extern atomic_t netfs_n_rh_dio_read;
114  extern atomic_t netfs_n_rh_readahead;
115  extern atomic_t netfs_n_rh_read_folio;
116  extern atomic_t netfs_n_rh_rreq;
117  extern atomic_t netfs_n_rh_sreq;
118  extern atomic_t netfs_n_rh_download;
119  extern atomic_t netfs_n_rh_download_done;
120  extern atomic_t netfs_n_rh_download_failed;
121  extern atomic_t netfs_n_rh_download_instead;
122  extern atomic_t netfs_n_rh_read;
123  extern atomic_t netfs_n_rh_read_done;
124  extern atomic_t netfs_n_rh_read_failed;
125  extern atomic_t netfs_n_rh_zero;
126  extern atomic_t netfs_n_rh_short_read;
127  extern atomic_t netfs_n_rh_write;
128  extern atomic_t netfs_n_rh_write_begin;
129  extern atomic_t netfs_n_rh_write_done;
130  extern atomic_t netfs_n_rh_write_failed;
131  extern atomic_t netfs_n_rh_write_zskip;
132  extern atomic_t netfs_n_wh_buffered_write;
133  extern atomic_t netfs_n_wh_writethrough;
134  extern atomic_t netfs_n_wh_dio_write;
135  extern atomic_t netfs_n_wh_writepages;
136  extern atomic_t netfs_n_wh_copy_to_cache;
137  extern atomic_t netfs_n_wh_wstream_conflict;
138  extern atomic_t netfs_n_wh_upload;
139  extern atomic_t netfs_n_wh_upload_done;
140  extern atomic_t netfs_n_wh_upload_failed;
141  extern atomic_t netfs_n_wh_write;
142  extern atomic_t netfs_n_wh_write_done;
143  extern atomic_t netfs_n_wh_write_failed;
144  extern atomic_t netfs_n_wb_lock_skip;
145  extern atomic_t netfs_n_wb_lock_wait;
146  extern atomic_t netfs_n_folioq;
147  
148  int netfs_stats_show(struct seq_file *m, void *v);
149  
netfs_stat(atomic_t * stat)150  static inline void netfs_stat(atomic_t *stat)
151  {
152  	atomic_inc(stat);
153  }
154  
netfs_stat_d(atomic_t * stat)155  static inline void netfs_stat_d(atomic_t *stat)
156  {
157  	atomic_dec(stat);
158  }
159  
160  #else
161  #define netfs_stat(x) do {} while(0)
162  #define netfs_stat_d(x) do {} while(0)
163  #endif
164  
165  /*
166   * write_collect.c
167   */
168  int netfs_folio_written_back(struct folio *folio);
169  void netfs_write_collection_worker(struct work_struct *work);
170  void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
171  
172  /*
173   * write_issue.c
174   */
175  struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
176  						struct file *file,
177  						loff_t start,
178  						enum netfs_io_origin origin);
179  void netfs_reissue_write(struct netfs_io_stream *stream,
180  			 struct netfs_io_subrequest *subreq,
181  			 struct iov_iter *source);
182  void netfs_issue_write(struct netfs_io_request *wreq,
183  		       struct netfs_io_stream *stream);
184  int netfs_advance_write(struct netfs_io_request *wreq,
185  			struct netfs_io_stream *stream,
186  			loff_t start, size_t len, bool to_eof);
187  struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
188  int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
189  			       struct folio *folio, size_t copied, bool to_page_end,
190  			       struct folio **writethrough_cache);
191  int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
192  			   struct folio *writethrough_cache);
193  int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
194  
195  /*
196   * Miscellaneous functions.
197   */
netfs_is_cache_enabled(struct netfs_inode * ctx)198  static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
199  {
200  #if IS_ENABLED(CONFIG_FSCACHE)
201  	struct fscache_cookie *cookie = ctx->cache;
202  
203  	return fscache_cookie_valid(cookie) && cookie->cache_priv &&
204  		fscache_cookie_enabled(cookie);
205  #else
206  	return false;
207  #endif
208  }
209  
210  /*
211   * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
212   */
netfs_get_group(struct netfs_group * netfs_group)213  static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
214  {
215  	if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
216  		refcount_inc(&netfs_group->ref);
217  	return netfs_group;
218  }
219  
220  /*
221   * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
222   */
netfs_put_group(struct netfs_group * netfs_group)223  static inline void netfs_put_group(struct netfs_group *netfs_group)
224  {
225  	if (netfs_group &&
226  	    netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
227  	    refcount_dec_and_test(&netfs_group->ref))
228  		netfs_group->free(netfs_group);
229  }
230  
231  /*
232   * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
233   */
netfs_put_group_many(struct netfs_group * netfs_group,int nr)234  static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
235  {
236  	if (netfs_group &&
237  	    netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
238  	    refcount_sub_and_test(nr, &netfs_group->ref))
239  		netfs_group->free(netfs_group);
240  }
241  
242  /*
243   * fscache-cache.c
244   */
245  #ifdef CONFIG_PROC_FS
246  extern const struct seq_operations fscache_caches_seq_ops;
247  #endif
248  bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
249  void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
250  struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
251  void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
252  
fscache_cache_state(const struct fscache_cache * cache)253  static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
254  {
255  	return smp_load_acquire(&cache->state);
256  }
257  
fscache_cache_is_live(const struct fscache_cache * cache)258  static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
259  {
260  	return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
261  }
262  
fscache_set_cache_state(struct fscache_cache * cache,enum fscache_cache_state new_state)263  static inline void fscache_set_cache_state(struct fscache_cache *cache,
264  					   enum fscache_cache_state new_state)
265  {
266  	smp_store_release(&cache->state, new_state);
267  
268  }
269  
fscache_set_cache_state_maybe(struct fscache_cache * cache,enum fscache_cache_state old_state,enum fscache_cache_state new_state)270  static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
271  						 enum fscache_cache_state old_state,
272  						 enum fscache_cache_state new_state)
273  {
274  	return try_cmpxchg_release(&cache->state, &old_state, new_state);
275  }
276  
277  /*
278   * fscache-cookie.c
279   */
280  extern struct kmem_cache *fscache_cookie_jar;
281  #ifdef CONFIG_PROC_FS
282  extern const struct seq_operations fscache_cookies_seq_ops;
283  #endif
284  extern struct timer_list fscache_cookie_lru_timer;
285  
286  extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
287  extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
288  					enum fscache_access_trace why);
289  
fscache_see_cookie(struct fscache_cookie * cookie,enum fscache_cookie_trace where)290  static inline void fscache_see_cookie(struct fscache_cookie *cookie,
291  				      enum fscache_cookie_trace where)
292  {
293  	trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
294  			     where);
295  }
296  
297  /*
298   * fscache-main.c
299   */
300  extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
301  #ifdef CONFIG_FSCACHE
302  int __init fscache_init(void);
303  void __exit fscache_exit(void);
304  #else
fscache_init(void)305  static inline int fscache_init(void) { return 0; }
fscache_exit(void)306  static inline void fscache_exit(void) {}
307  #endif
308  
309  /*
310   * fscache-proc.c
311   */
312  #ifdef CONFIG_PROC_FS
313  extern int __init fscache_proc_init(void);
314  extern void fscache_proc_cleanup(void);
315  #else
316  #define fscache_proc_init()	(0)
317  #define fscache_proc_cleanup()	do {} while (0)
318  #endif
319  
320  /*
321   * fscache-stats.c
322   */
323  #ifdef CONFIG_FSCACHE_STATS
324  extern atomic_t fscache_n_volumes;
325  extern atomic_t fscache_n_volumes_collision;
326  extern atomic_t fscache_n_volumes_nomem;
327  extern atomic_t fscache_n_cookies;
328  extern atomic_t fscache_n_cookies_lru;
329  extern atomic_t fscache_n_cookies_lru_expired;
330  extern atomic_t fscache_n_cookies_lru_removed;
331  extern atomic_t fscache_n_cookies_lru_dropped;
332  
333  extern atomic_t fscache_n_acquires;
334  extern atomic_t fscache_n_acquires_ok;
335  extern atomic_t fscache_n_acquires_oom;
336  
337  extern atomic_t fscache_n_invalidates;
338  
339  extern atomic_t fscache_n_relinquishes;
340  extern atomic_t fscache_n_relinquishes_retire;
341  extern atomic_t fscache_n_relinquishes_dropped;
342  
343  extern atomic_t fscache_n_resizes;
344  extern atomic_t fscache_n_resizes_null;
345  
fscache_stat(atomic_t * stat)346  static inline void fscache_stat(atomic_t *stat)
347  {
348  	atomic_inc(stat);
349  }
350  
fscache_stat_d(atomic_t * stat)351  static inline void fscache_stat_d(atomic_t *stat)
352  {
353  	atomic_dec(stat);
354  }
355  
356  #define __fscache_stat(stat) (stat)
357  
358  int fscache_stats_show(struct seq_file *m);
359  #else
360  
361  #define __fscache_stat(stat) (NULL)
362  #define fscache_stat(stat) do {} while (0)
363  #define fscache_stat_d(stat) do {} while (0)
364  
fscache_stats_show(struct seq_file * m)365  static inline int fscache_stats_show(struct seq_file *m) { return 0; }
366  #endif
367  
368  /*
369   * fscache-volume.c
370   */
371  #ifdef CONFIG_PROC_FS
372  extern const struct seq_operations fscache_volumes_seq_ops;
373  #endif
374  
375  struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
376  					  enum fscache_volume_trace where);
377  bool fscache_begin_volume_access(struct fscache_volume *volume,
378  				 struct fscache_cookie *cookie,
379  				 enum fscache_access_trace why);
380  void fscache_create_volume(struct fscache_volume *volume, bool wait);
381  
382  /*****************************************************************************/
383  /*
384   * debug tracing
385   */
386  #define dbgprintk(FMT, ...) \
387  	printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
388  
389  #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
390  #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
391  #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
392  
393  #ifdef __KDEBUG
394  #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
395  #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
396  #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
397  
398  #elif defined(CONFIG_NETFS_DEBUG)
399  #define _enter(FMT, ...)			\
400  do {						\
401  	if (netfs_debug)			\
402  		kenter(FMT, ##__VA_ARGS__);	\
403  } while (0)
404  
405  #define _leave(FMT, ...)			\
406  do {						\
407  	if (netfs_debug)			\
408  		kleave(FMT, ##__VA_ARGS__);	\
409  } while (0)
410  
411  #define _debug(FMT, ...)			\
412  do {						\
413  	if (netfs_debug)			\
414  		kdebug(FMT, ##__VA_ARGS__);	\
415  } while (0)
416  
417  #else
418  #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
419  #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
420  #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
421  #endif
422  
423  /*
424   * assertions
425   */
426  #if 1 /* defined(__KDEBUGALL) */
427  
428  #define ASSERT(X)							\
429  do {									\
430  	if (unlikely(!(X))) {						\
431  		pr_err("\n");					\
432  		pr_err("Assertion failed\n");	\
433  		BUG();							\
434  	}								\
435  } while (0)
436  
437  #define ASSERTCMP(X, OP, Y)						\
438  do {									\
439  	if (unlikely(!((X) OP (Y)))) {					\
440  		pr_err("\n");					\
441  		pr_err("Assertion failed\n");	\
442  		pr_err("%lx " #OP " %lx is false\n",		\
443  		       (unsigned long)(X), (unsigned long)(Y));		\
444  		BUG();							\
445  	}								\
446  } while (0)
447  
448  #define ASSERTIF(C, X)							\
449  do {									\
450  	if (unlikely((C) && !(X))) {					\
451  		pr_err("\n");					\
452  		pr_err("Assertion failed\n");	\
453  		BUG();							\
454  	}								\
455  } while (0)
456  
457  #define ASSERTIFCMP(C, X, OP, Y)					\
458  do {									\
459  	if (unlikely((C) && !((X) OP (Y)))) {				\
460  		pr_err("\n");					\
461  		pr_err("Assertion failed\n");	\
462  		pr_err("%lx " #OP " %lx is false\n",		\
463  		       (unsigned long)(X), (unsigned long)(Y));		\
464  		BUG();							\
465  	}								\
466  } while (0)
467  
468  #else
469  
470  #define ASSERT(X)			do {} while (0)
471  #define ASSERTCMP(X, OP, Y)		do {} while (0)
472  #define ASSERTIF(C, X)			do {} while (0)
473  #define ASSERTIFCMP(C, X, OP, Y)	do {} while (0)
474  
475  #endif /* assert or not */
476