1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * include/linux/backing-dev.h
4   *
5   * low-level device information and state which is propagated up through
6   * to high-level code.
7   */
8  
9  #ifndef _LINUX_BACKING_DEV_H
10  #define _LINUX_BACKING_DEV_H
11  
12  #include <linux/kernel.h>
13  #include <linux/fs.h>
14  #include <linux/sched.h>
15  #include <linux/device.h>
16  #include <linux/writeback.h>
17  #include <linux/backing-dev-defs.h>
18  #include <linux/slab.h>
19  
bdi_get(struct backing_dev_info * bdi)20  static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
21  {
22  	kref_get(&bdi->refcnt);
23  	return bdi;
24  }
25  
26  struct backing_dev_info *bdi_get_by_id(u64 id);
27  void bdi_put(struct backing_dev_info *bdi);
28  
29  __printf(2, 3)
30  int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31  __printf(2, 0)
32  int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
33  		    va_list args);
34  void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
35  void bdi_unregister(struct backing_dev_info *bdi);
36  
37  struct backing_dev_info *bdi_alloc(int node_id);
38  
39  void wb_start_background_writeback(struct bdi_writeback *wb);
40  void wb_workfn(struct work_struct *work);
41  
42  void wb_wait_for_completion(struct wb_completion *done);
43  
44  extern spinlock_t bdi_lock;
45  extern struct list_head bdi_list;
46  
47  extern struct workqueue_struct *bdi_wq;
48  
wb_has_dirty_io(struct bdi_writeback * wb)49  static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
50  {
51  	return test_bit(WB_has_dirty_io, &wb->state);
52  }
53  
bdi_has_dirty_io(struct backing_dev_info * bdi)54  static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
55  {
56  	/*
57  	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
58  	 * any dirty wbs.  See wb_update_write_bandwidth().
59  	 */
60  	return atomic_long_read(&bdi->tot_write_bandwidth);
61  }
62  
wb_stat_mod(struct bdi_writeback * wb,enum wb_stat_item item,s64 amount)63  static inline void wb_stat_mod(struct bdi_writeback *wb,
64  				 enum wb_stat_item item, s64 amount)
65  {
66  	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
67  }
68  
inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)69  static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
70  {
71  	wb_stat_mod(wb, item, 1);
72  }
73  
dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)74  static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
75  {
76  	wb_stat_mod(wb, item, -1);
77  }
78  
wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)79  static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
80  {
81  	return percpu_counter_read_positive(&wb->stat[item]);
82  }
83  
wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)84  static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
85  {
86  	return percpu_counter_sum_positive(&wb->stat[item]);
87  }
88  
89  extern void wb_writeout_inc(struct bdi_writeback *wb);
90  
91  /*
92   * maximal error of a stat counter.
93   */
wb_stat_error(void)94  static inline unsigned long wb_stat_error(void)
95  {
96  #ifdef CONFIG_SMP
97  	return nr_cpu_ids * WB_STAT_BATCH;
98  #else
99  	return 1;
100  #endif
101  }
102  
103  /* BDI ratio is expressed as part per 1000000 for finer granularity. */
104  #define BDI_RATIO_SCALE 10000
105  
106  u64 bdi_get_min_bytes(struct backing_dev_info *bdi);
107  u64 bdi_get_max_bytes(struct backing_dev_info *bdi);
108  int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
109  int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
110  int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio);
111  int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio);
112  int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes);
113  int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes);
114  int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit);
115  
116  /*
117   * Flags in backing_dev_info::capability
118   *
119   * BDI_CAP_WRITEBACK:		Supports dirty page writeback, and dirty pages
120   *				should contribute to accounting
121   * BDI_CAP_WRITEBACK_ACCT:	Automatically account writeback pages
122   * BDI_CAP_STRICTLIMIT:		Keep number of dirty pages below bdi threshold
123   */
124  #define BDI_CAP_WRITEBACK		(1 << 0)
125  #define BDI_CAP_WRITEBACK_ACCT		(1 << 1)
126  #define BDI_CAP_STRICTLIMIT		(1 << 2)
127  
128  extern struct backing_dev_info noop_backing_dev_info;
129  
130  int bdi_init(struct backing_dev_info *bdi);
131  
132  /**
133   * writeback_in_progress - determine whether there is writeback in progress
134   * @wb: bdi_writeback of interest
135   *
136   * Determine whether there is writeback waiting to be handled against a
137   * bdi_writeback.
138   */
writeback_in_progress(struct bdi_writeback * wb)139  static inline bool writeback_in_progress(struct bdi_writeback *wb)
140  {
141  	return test_bit(WB_writeback_running, &wb->state);
142  }
143  
144  struct backing_dev_info *inode_to_bdi(struct inode *inode);
145  
mapping_can_writeback(struct address_space * mapping)146  static inline bool mapping_can_writeback(struct address_space *mapping)
147  {
148  	return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
149  }
150  
151  #ifdef CONFIG_CGROUP_WRITEBACK
152  
153  struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
154  				    struct cgroup_subsys_state *memcg_css);
155  struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
156  				    struct cgroup_subsys_state *memcg_css,
157  				    gfp_t gfp);
158  void wb_memcg_offline(struct mem_cgroup *memcg);
159  void wb_blkcg_offline(struct cgroup_subsys_state *css);
160  
161  /**
162   * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
163   * @inode: inode of interest
164   *
165   * Cgroup writeback requires support from the filesystem.  Also, both memcg and
166   * iocg have to be on the default hierarchy.  Test whether all conditions are
167   * met.
168   *
169   * Note that the test result may change dynamically on the same inode
170   * depending on how memcg and iocg are configured.
171   */
inode_cgwb_enabled(struct inode * inode)172  static inline bool inode_cgwb_enabled(struct inode *inode)
173  {
174  	struct backing_dev_info *bdi = inode_to_bdi(inode);
175  
176  	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
177  		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
178  		(bdi->capabilities & BDI_CAP_WRITEBACK) &&
179  		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
180  }
181  
182  /**
183   * wb_find_current - find wb for %current on a bdi
184   * @bdi: bdi of interest
185   *
186   * Find the wb of @bdi which matches both the memcg and blkcg of %current.
187   * Must be called under rcu_read_lock() which protects the returend wb.
188   * NULL if not found.
189   */
wb_find_current(struct backing_dev_info * bdi)190  static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
191  {
192  	struct cgroup_subsys_state *memcg_css;
193  	struct bdi_writeback *wb;
194  
195  	memcg_css = task_css(current, memory_cgrp_id);
196  	if (!memcg_css->parent)
197  		return &bdi->wb;
198  
199  	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
200  
201  	/*
202  	 * %current's blkcg equals the effective blkcg of its memcg.  No
203  	 * need to use the relatively expensive cgroup_get_e_css().
204  	 */
205  	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
206  		return wb;
207  	return NULL;
208  }
209  
210  /**
211   * wb_get_create_current - get or create wb for %current on a bdi
212   * @bdi: bdi of interest
213   * @gfp: allocation mask
214   *
215   * Equivalent to wb_get_create() on %current's memcg.  This function is
216   * called from a relatively hot path and optimizes the common cases using
217   * wb_find_current().
218   */
219  static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)220  wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
221  {
222  	struct bdi_writeback *wb;
223  
224  	rcu_read_lock();
225  	wb = wb_find_current(bdi);
226  	if (wb && unlikely(!wb_tryget(wb)))
227  		wb = NULL;
228  	rcu_read_unlock();
229  
230  	if (unlikely(!wb)) {
231  		struct cgroup_subsys_state *memcg_css;
232  
233  		memcg_css = task_get_css(current, memory_cgrp_id);
234  		wb = wb_get_create(bdi, memcg_css, gfp);
235  		css_put(memcg_css);
236  	}
237  	return wb;
238  }
239  
240  /**
241   * inode_to_wb - determine the wb of an inode
242   * @inode: inode of interest
243   *
244   * Returns the wb @inode is currently associated with.  The caller must be
245   * holding either @inode->i_lock, the i_pages lock, or the
246   * associated wb's list_lock.
247   */
inode_to_wb(const struct inode * inode)248  static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
249  {
250  #ifdef CONFIG_LOCKDEP
251  	WARN_ON_ONCE(debug_locks &&
252  		     (!lockdep_is_held(&inode->i_lock) &&
253  		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
254  		      !lockdep_is_held(&inode->i_wb->list_lock)));
255  #endif
256  	return inode->i_wb;
257  }
258  
inode_to_wb_wbc(struct inode * inode,struct writeback_control * wbc)259  static inline struct bdi_writeback *inode_to_wb_wbc(
260  				struct inode *inode,
261  				struct writeback_control *wbc)
262  {
263  	/*
264  	 * If wbc does not have inode attached, it means cgroup writeback was
265  	 * disabled when wbc started. Just use the default wb in that case.
266  	 */
267  	return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
268  }
269  
270  /**
271   * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
272   * @inode: target inode
273   * @cookie: output param, to be passed to the end function
274   *
275   * The caller wants to access the wb associated with @inode but isn't
276   * holding inode->i_lock, the i_pages lock or wb->list_lock.  This
277   * function determines the wb associated with @inode and ensures that the
278   * association doesn't change until the transaction is finished with
279   * unlocked_inode_to_wb_end().
280   *
281   * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
282   * can't sleep during the transaction.  IRQs may or may not be disabled on
283   * return.
284   */
285  static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)286  unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
287  {
288  	rcu_read_lock();
289  
290  	/*
291  	 * Paired with store_release in inode_switch_wbs_work_fn() and
292  	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
293  	 */
294  	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
295  
296  	if (unlikely(cookie->locked))
297  		xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
298  
299  	/*
300  	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
301  	 * lock.  inode_to_wb() will bark.  Deref directly.
302  	 */
303  	return inode->i_wb;
304  }
305  
306  /**
307   * unlocked_inode_to_wb_end - end inode wb access transaction
308   * @inode: target inode
309   * @cookie: @cookie from unlocked_inode_to_wb_begin()
310   */
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)311  static inline void unlocked_inode_to_wb_end(struct inode *inode,
312  					    struct wb_lock_cookie *cookie)
313  {
314  	if (unlikely(cookie->locked))
315  		xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
316  
317  	rcu_read_unlock();
318  }
319  
320  #else	/* CONFIG_CGROUP_WRITEBACK */
321  
inode_cgwb_enabled(struct inode * inode)322  static inline bool inode_cgwb_enabled(struct inode *inode)
323  {
324  	return false;
325  }
326  
wb_find_current(struct backing_dev_info * bdi)327  static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
328  {
329  	return &bdi->wb;
330  }
331  
332  static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)333  wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
334  {
335  	return &bdi->wb;
336  }
337  
inode_to_wb(struct inode * inode)338  static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
339  {
340  	return &inode_to_bdi(inode)->wb;
341  }
342  
inode_to_wb_wbc(struct inode * inode,struct writeback_control * wbc)343  static inline struct bdi_writeback *inode_to_wb_wbc(
344  				struct inode *inode,
345  				struct writeback_control *wbc)
346  {
347  	return inode_to_wb(inode);
348  }
349  
350  
351  static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)352  unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
353  {
354  	return inode_to_wb(inode);
355  }
356  
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)357  static inline void unlocked_inode_to_wb_end(struct inode *inode,
358  					    struct wb_lock_cookie *cookie)
359  {
360  }
361  
wb_memcg_offline(struct mem_cgroup * memcg)362  static inline void wb_memcg_offline(struct mem_cgroup *memcg)
363  {
364  }
365  
wb_blkcg_offline(struct cgroup_subsys_state * css)366  static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
367  {
368  }
369  
370  #endif	/* CONFIG_CGROUP_WRITEBACK */
371  
372  const char *bdi_dev_name(struct backing_dev_info *bdi);
373  
374  #endif	/* _LINUX_BACKING_DEV_H */
375