1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * dax: direct host memory access
4   * Copyright (C) 2020 Red Hat, Inc.
5   */
6  
7  #include "fuse_i.h"
8  
9  #include <linux/delay.h>
10  #include <linux/dax.h>
11  #include <linux/uio.h>
12  #include <linux/pagemap.h>
13  #include <linux/pfn_t.h>
14  #include <linux/iomap.h>
15  #include <linux/interval_tree.h>
16  
17  /*
18   * Default memory range size.  A power of 2 so it agrees with common FUSE_INIT
19   * map_alignment values 4KB and 64KB.
20   */
21  #define FUSE_DAX_SHIFT	21
22  #define FUSE_DAX_SZ	(1 << FUSE_DAX_SHIFT)
23  #define FUSE_DAX_PAGES	(FUSE_DAX_SZ / PAGE_SIZE)
24  
25  /* Number of ranges reclaimer will try to free in one invocation */
26  #define FUSE_DAX_RECLAIM_CHUNK		(10)
27  
28  /*
29   * Dax memory reclaim threshold in percetage of total ranges. When free
30   * number of free ranges drops below this threshold, reclaim can trigger
31   * Default is 20%
32   */
33  #define FUSE_DAX_RECLAIM_THRESHOLD	(20)
34  
35  /** Translation information for file offsets to DAX window offsets */
36  struct fuse_dax_mapping {
37  	/* Pointer to inode where this memory range is mapped */
38  	struct inode *inode;
39  
40  	/* Will connect in fcd->free_ranges to keep track of free memory */
41  	struct list_head list;
42  
43  	/* For interval tree in file/inode */
44  	struct interval_tree_node itn;
45  
46  	/* Will connect in fc->busy_ranges to keep track busy memory */
47  	struct list_head busy_list;
48  
49  	/** Position in DAX window */
50  	u64 window_offset;
51  
52  	/** Length of mapping, in bytes */
53  	loff_t length;
54  
55  	/* Is this mapping read-only or read-write */
56  	bool writable;
57  
58  	/* reference count when the mapping is used by dax iomap. */
59  	refcount_t refcnt;
60  };
61  
62  /* Per-inode dax map */
63  struct fuse_inode_dax {
64  	/* Semaphore to protect modifications to the dmap tree */
65  	struct rw_semaphore sem;
66  
67  	/* Sorted rb tree of struct fuse_dax_mapping elements */
68  	struct rb_root_cached tree;
69  	unsigned long nr;
70  };
71  
72  struct fuse_conn_dax {
73  	/* DAX device */
74  	struct dax_device *dev;
75  
76  	/* Lock protecting accessess to  members of this structure */
77  	spinlock_t lock;
78  
79  	/* List of memory ranges which are busy */
80  	unsigned long nr_busy_ranges;
81  	struct list_head busy_ranges;
82  
83  	/* Worker to free up memory ranges */
84  	struct delayed_work free_work;
85  
86  	/* Wait queue for a dax range to become free */
87  	wait_queue_head_t range_waitq;
88  
89  	/* DAX Window Free Ranges */
90  	long nr_free_ranges;
91  	struct list_head free_ranges;
92  
93  	unsigned long nr_ranges;
94  };
95  
96  static inline struct fuse_dax_mapping *
node_to_dmap(struct interval_tree_node * node)97  node_to_dmap(struct interval_tree_node *node)
98  {
99  	if (!node)
100  		return NULL;
101  
102  	return container_of(node, struct fuse_dax_mapping, itn);
103  }
104  
105  static struct fuse_dax_mapping *
106  alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
107  
108  static void
__kick_dmap_free_worker(struct fuse_conn_dax * fcd,unsigned long delay_ms)109  __kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
110  {
111  	unsigned long free_threshold;
112  
113  	/* If number of free ranges are below threshold, start reclaim */
114  	free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100,
115  			     1);
116  	if (fcd->nr_free_ranges < free_threshold)
117  		queue_delayed_work(system_long_wq, &fcd->free_work,
118  				   msecs_to_jiffies(delay_ms));
119  }
120  
kick_dmap_free_worker(struct fuse_conn_dax * fcd,unsigned long delay_ms)121  static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
122  				  unsigned long delay_ms)
123  {
124  	spin_lock(&fcd->lock);
125  	__kick_dmap_free_worker(fcd, delay_ms);
126  	spin_unlock(&fcd->lock);
127  }
128  
alloc_dax_mapping(struct fuse_conn_dax * fcd)129  static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
130  {
131  	struct fuse_dax_mapping *dmap;
132  
133  	spin_lock(&fcd->lock);
134  	dmap = list_first_entry_or_null(&fcd->free_ranges,
135  					struct fuse_dax_mapping, list);
136  	if (dmap) {
137  		list_del_init(&dmap->list);
138  		WARN_ON(fcd->nr_free_ranges <= 0);
139  		fcd->nr_free_ranges--;
140  	}
141  	__kick_dmap_free_worker(fcd, 0);
142  	spin_unlock(&fcd->lock);
143  
144  	return dmap;
145  }
146  
147  /* This assumes fcd->lock is held */
__dmap_remove_busy_list(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)148  static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
149  				    struct fuse_dax_mapping *dmap)
150  {
151  	list_del_init(&dmap->busy_list);
152  	WARN_ON(fcd->nr_busy_ranges == 0);
153  	fcd->nr_busy_ranges--;
154  }
155  
dmap_remove_busy_list(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)156  static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
157  				  struct fuse_dax_mapping *dmap)
158  {
159  	spin_lock(&fcd->lock);
160  	__dmap_remove_busy_list(fcd, dmap);
161  	spin_unlock(&fcd->lock);
162  }
163  
164  /* This assumes fcd->lock is held */
__dmap_add_to_free_pool(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)165  static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
166  				struct fuse_dax_mapping *dmap)
167  {
168  	list_add_tail(&dmap->list, &fcd->free_ranges);
169  	fcd->nr_free_ranges++;
170  	wake_up(&fcd->range_waitq);
171  }
172  
dmap_add_to_free_pool(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)173  static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
174  				struct fuse_dax_mapping *dmap)
175  {
176  	/* Return fuse_dax_mapping to free list */
177  	spin_lock(&fcd->lock);
178  	__dmap_add_to_free_pool(fcd, dmap);
179  	spin_unlock(&fcd->lock);
180  }
181  
fuse_setup_one_mapping(struct inode * inode,unsigned long start_idx,struct fuse_dax_mapping * dmap,bool writable,bool upgrade)182  static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
183  				  struct fuse_dax_mapping *dmap, bool writable,
184  				  bool upgrade)
185  {
186  	struct fuse_mount *fm = get_fuse_mount(inode);
187  	struct fuse_conn_dax *fcd = fm->fc->dax;
188  	struct fuse_inode *fi = get_fuse_inode(inode);
189  	struct fuse_setupmapping_in inarg;
190  	loff_t offset = start_idx << FUSE_DAX_SHIFT;
191  	FUSE_ARGS(args);
192  	ssize_t err;
193  
194  	WARN_ON(fcd->nr_free_ranges < 0);
195  
196  	/* Ask fuse daemon to setup mapping */
197  	memset(&inarg, 0, sizeof(inarg));
198  	inarg.foffset = offset;
199  	inarg.fh = -1;
200  	inarg.moffset = dmap->window_offset;
201  	inarg.len = FUSE_DAX_SZ;
202  	inarg.flags |= FUSE_SETUPMAPPING_FLAG_READ;
203  	if (writable)
204  		inarg.flags |= FUSE_SETUPMAPPING_FLAG_WRITE;
205  	args.opcode = FUSE_SETUPMAPPING;
206  	args.nodeid = fi->nodeid;
207  	args.in_numargs = 1;
208  	args.in_args[0].size = sizeof(inarg);
209  	args.in_args[0].value = &inarg;
210  	err = fuse_simple_request(fm, &args);
211  	if (err < 0)
212  		return err;
213  	dmap->writable = writable;
214  	if (!upgrade) {
215  		/*
216  		 * We don't take a reference on inode. inode is valid right now
217  		 * and when inode is going away, cleanup logic should first
218  		 * cleanup dmap entries.
219  		 */
220  		dmap->inode = inode;
221  		dmap->itn.start = dmap->itn.last = start_idx;
222  		/* Protected by fi->dax->sem */
223  		interval_tree_insert(&dmap->itn, &fi->dax->tree);
224  		fi->dax->nr++;
225  		spin_lock(&fcd->lock);
226  		list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
227  		fcd->nr_busy_ranges++;
228  		spin_unlock(&fcd->lock);
229  	}
230  	return 0;
231  }
232  
fuse_send_removemapping(struct inode * inode,struct fuse_removemapping_in * inargp,struct fuse_removemapping_one * remove_one)233  static int fuse_send_removemapping(struct inode *inode,
234  				   struct fuse_removemapping_in *inargp,
235  				   struct fuse_removemapping_one *remove_one)
236  {
237  	struct fuse_inode *fi = get_fuse_inode(inode);
238  	struct fuse_mount *fm = get_fuse_mount(inode);
239  	FUSE_ARGS(args);
240  
241  	args.opcode = FUSE_REMOVEMAPPING;
242  	args.nodeid = fi->nodeid;
243  	args.in_numargs = 2;
244  	args.in_args[0].size = sizeof(*inargp);
245  	args.in_args[0].value = inargp;
246  	args.in_args[1].size = inargp->count * sizeof(*remove_one);
247  	args.in_args[1].value = remove_one;
248  	return fuse_simple_request(fm, &args);
249  }
250  
dmap_removemapping_list(struct inode * inode,unsigned int num,struct list_head * to_remove)251  static int dmap_removemapping_list(struct inode *inode, unsigned int num,
252  				   struct list_head *to_remove)
253  {
254  	struct fuse_removemapping_one *remove_one, *ptr;
255  	struct fuse_removemapping_in inarg;
256  	struct fuse_dax_mapping *dmap;
257  	int ret, i = 0, nr_alloc;
258  
259  	nr_alloc = min_t(unsigned int, num, FUSE_REMOVEMAPPING_MAX_ENTRY);
260  	remove_one = kmalloc_array(nr_alloc, sizeof(*remove_one), GFP_NOFS);
261  	if (!remove_one)
262  		return -ENOMEM;
263  
264  	ptr = remove_one;
265  	list_for_each_entry(dmap, to_remove, list) {
266  		ptr->moffset = dmap->window_offset;
267  		ptr->len = dmap->length;
268  		ptr++;
269  		i++;
270  		num--;
271  		if (i >= nr_alloc || num == 0) {
272  			memset(&inarg, 0, sizeof(inarg));
273  			inarg.count = i;
274  			ret = fuse_send_removemapping(inode, &inarg,
275  						      remove_one);
276  			if (ret)
277  				goto out;
278  			ptr = remove_one;
279  			i = 0;
280  		}
281  	}
282  out:
283  	kfree(remove_one);
284  	return ret;
285  }
286  
287  /*
288   * Cleanup dmap entry and add back to free list. This should be called with
289   * fcd->lock held.
290   */
dmap_reinit_add_to_free_pool(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)291  static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
292  					    struct fuse_dax_mapping *dmap)
293  {
294  	pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n",
295  		 dmap->itn.start, dmap->itn.last, dmap->window_offset,
296  		 dmap->length);
297  	__dmap_remove_busy_list(fcd, dmap);
298  	dmap->inode = NULL;
299  	dmap->itn.start = dmap->itn.last = 0;
300  	__dmap_add_to_free_pool(fcd, dmap);
301  }
302  
303  /*
304   * Free inode dmap entries whose range falls inside [start, end].
305   * Does not take any locks. At this point of time it should only be
306   * called from evict_inode() path where we know all dmap entries can be
307   * reclaimed.
308   */
inode_reclaim_dmap_range(struct fuse_conn_dax * fcd,struct inode * inode,loff_t start,loff_t end)309  static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
310  				     struct inode *inode,
311  				     loff_t start, loff_t end)
312  {
313  	struct fuse_inode *fi = get_fuse_inode(inode);
314  	struct fuse_dax_mapping *dmap, *n;
315  	int err, num = 0;
316  	LIST_HEAD(to_remove);
317  	unsigned long start_idx = start >> FUSE_DAX_SHIFT;
318  	unsigned long end_idx = end >> FUSE_DAX_SHIFT;
319  	struct interval_tree_node *node;
320  
321  	while (1) {
322  		node = interval_tree_iter_first(&fi->dax->tree, start_idx,
323  						end_idx);
324  		if (!node)
325  			break;
326  		dmap = node_to_dmap(node);
327  		/* inode is going away. There should not be any users of dmap */
328  		WARN_ON(refcount_read(&dmap->refcnt) > 1);
329  		interval_tree_remove(&dmap->itn, &fi->dax->tree);
330  		num++;
331  		list_add(&dmap->list, &to_remove);
332  	}
333  
334  	/* Nothing to remove */
335  	if (list_empty(&to_remove))
336  		return;
337  
338  	WARN_ON(fi->dax->nr < num);
339  	fi->dax->nr -= num;
340  	err = dmap_removemapping_list(inode, num, &to_remove);
341  	if (err && err != -ENOTCONN) {
342  		pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n",
343  			start, end);
344  	}
345  	spin_lock(&fcd->lock);
346  	list_for_each_entry_safe(dmap, n, &to_remove, list) {
347  		list_del_init(&dmap->list);
348  		dmap_reinit_add_to_free_pool(fcd, dmap);
349  	}
350  	spin_unlock(&fcd->lock);
351  }
352  
dmap_removemapping_one(struct inode * inode,struct fuse_dax_mapping * dmap)353  static int dmap_removemapping_one(struct inode *inode,
354  				  struct fuse_dax_mapping *dmap)
355  {
356  	struct fuse_removemapping_one forget_one;
357  	struct fuse_removemapping_in inarg;
358  
359  	memset(&inarg, 0, sizeof(inarg));
360  	inarg.count = 1;
361  	memset(&forget_one, 0, sizeof(forget_one));
362  	forget_one.moffset = dmap->window_offset;
363  	forget_one.len = dmap->length;
364  
365  	return fuse_send_removemapping(inode, &inarg, &forget_one);
366  }
367  
368  /*
369   * It is called from evict_inode() and by that time inode is going away. So
370   * this function does not take any locks like fi->dax->sem for traversing
371   * that fuse inode interval tree. If that lock is taken then lock validator
372   * complains of deadlock situation w.r.t fs_reclaim lock.
373   */
fuse_dax_inode_cleanup(struct inode * inode)374  void fuse_dax_inode_cleanup(struct inode *inode)
375  {
376  	struct fuse_conn *fc = get_fuse_conn(inode);
377  	struct fuse_inode *fi = get_fuse_inode(inode);
378  
379  	/*
380  	 * fuse_evict_inode() has already called truncate_inode_pages_final()
381  	 * before we arrive here. So we should not have to worry about any
382  	 * pages/exception entries still associated with inode.
383  	 */
384  	inode_reclaim_dmap_range(fc->dax, inode, 0, -1);
385  	WARN_ON(fi->dax->nr);
386  }
387  
fuse_fill_iomap_hole(struct iomap * iomap,loff_t length)388  static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length)
389  {
390  	iomap->addr = IOMAP_NULL_ADDR;
391  	iomap->length = length;
392  	iomap->type = IOMAP_HOLE;
393  }
394  
fuse_fill_iomap(struct inode * inode,loff_t pos,loff_t length,struct iomap * iomap,struct fuse_dax_mapping * dmap,unsigned int flags)395  static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length,
396  			    struct iomap *iomap, struct fuse_dax_mapping *dmap,
397  			    unsigned int flags)
398  {
399  	loff_t offset, len;
400  	loff_t i_size = i_size_read(inode);
401  
402  	offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT);
403  	len = min(length, dmap->length - offset);
404  
405  	/* If length is beyond end of file, truncate further */
406  	if (pos + len > i_size)
407  		len = i_size - pos;
408  
409  	if (len > 0) {
410  		iomap->addr = dmap->window_offset + offset;
411  		iomap->length = len;
412  		if (flags & IOMAP_FAULT)
413  			iomap->length = ALIGN(len, PAGE_SIZE);
414  		iomap->type = IOMAP_MAPPED;
415  		/*
416  		 * increace refcnt so that reclaim code knows this dmap is in
417  		 * use. This assumes fi->dax->sem mutex is held either
418  		 * shared/exclusive.
419  		 */
420  		refcount_inc(&dmap->refcnt);
421  
422  		/* iomap->private should be NULL */
423  		WARN_ON_ONCE(iomap->private);
424  		iomap->private = dmap;
425  	} else {
426  		/* Mapping beyond end of file is hole */
427  		fuse_fill_iomap_hole(iomap, length);
428  	}
429  }
430  
fuse_setup_new_dax_mapping(struct inode * inode,loff_t pos,loff_t length,unsigned int flags,struct iomap * iomap)431  static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
432  				      loff_t length, unsigned int flags,
433  				      struct iomap *iomap)
434  {
435  	struct fuse_inode *fi = get_fuse_inode(inode);
436  	struct fuse_conn *fc = get_fuse_conn(inode);
437  	struct fuse_conn_dax *fcd = fc->dax;
438  	struct fuse_dax_mapping *dmap, *alloc_dmap = NULL;
439  	int ret;
440  	bool writable = flags & IOMAP_WRITE;
441  	unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
442  	struct interval_tree_node *node;
443  
444  	/*
445  	 * Can't do inline reclaim in fault path. We call
446  	 * dax_layout_busy_page() before we free a range. And
447  	 * fuse_wait_dax_page() drops mapping->invalidate_lock and requires it.
448  	 * In fault path we enter with mapping->invalidate_lock held and can't
449  	 * drop it. Also in fault path we hold mapping->invalidate_lock shared
450  	 * and not exclusive, so that creates further issues with
451  	 * fuse_wait_dax_page().  Hence return -EAGAIN and fuse_dax_fault()
452  	 * will wait for a memory range to become free and retry.
453  	 */
454  	if (flags & IOMAP_FAULT) {
455  		alloc_dmap = alloc_dax_mapping(fcd);
456  		if (!alloc_dmap)
457  			return -EAGAIN;
458  	} else {
459  		alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode);
460  		if (IS_ERR(alloc_dmap))
461  			return PTR_ERR(alloc_dmap);
462  	}
463  
464  	/* If we are here, we should have memory allocated */
465  	if (WARN_ON(!alloc_dmap))
466  		return -EIO;
467  
468  	/*
469  	 * Take write lock so that only one caller can try to setup mapping
470  	 * and other waits.
471  	 */
472  	down_write(&fi->dax->sem);
473  	/*
474  	 * We dropped lock. Check again if somebody else setup
475  	 * mapping already.
476  	 */
477  	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
478  	if (node) {
479  		dmap = node_to_dmap(node);
480  		fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
481  		dmap_add_to_free_pool(fcd, alloc_dmap);
482  		up_write(&fi->dax->sem);
483  		return 0;
484  	}
485  
486  	/* Setup one mapping */
487  	ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, alloc_dmap,
488  				     writable, false);
489  	if (ret < 0) {
490  		dmap_add_to_free_pool(fcd, alloc_dmap);
491  		up_write(&fi->dax->sem);
492  		return ret;
493  	}
494  	fuse_fill_iomap(inode, pos, length, iomap, alloc_dmap, flags);
495  	up_write(&fi->dax->sem);
496  	return 0;
497  }
498  
fuse_upgrade_dax_mapping(struct inode * inode,loff_t pos,loff_t length,unsigned int flags,struct iomap * iomap)499  static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
500  				    loff_t length, unsigned int flags,
501  				    struct iomap *iomap)
502  {
503  	struct fuse_inode *fi = get_fuse_inode(inode);
504  	struct fuse_dax_mapping *dmap;
505  	int ret;
506  	unsigned long idx = pos >> FUSE_DAX_SHIFT;
507  	struct interval_tree_node *node;
508  
509  	/*
510  	 * Take exclusive lock so that only one caller can try to setup
511  	 * mapping and others wait.
512  	 */
513  	down_write(&fi->dax->sem);
514  	node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
515  
516  	/* We are holding either inode lock or invalidate_lock, and that should
517  	 * ensure that dmap can't be truncated. We are holding a reference
518  	 * on dmap and that should make sure it can't be reclaimed. So dmap
519  	 * should still be there in tree despite the fact we dropped and
520  	 * re-acquired the fi->dax->sem lock.
521  	 */
522  	ret = -EIO;
523  	if (WARN_ON(!node))
524  		goto out_err;
525  
526  	dmap = node_to_dmap(node);
527  
528  	/* We took an extra reference on dmap to make sure its not reclaimd.
529  	 * Now we hold fi->dax->sem lock and that reference is not needed
530  	 * anymore. Drop it.
531  	 */
532  	if (refcount_dec_and_test(&dmap->refcnt)) {
533  		/* refcount should not hit 0. This object only goes
534  		 * away when fuse connection goes away
535  		 */
536  		WARN_ON_ONCE(1);
537  	}
538  
539  	/* Maybe another thread already upgraded mapping while we were not
540  	 * holding lock.
541  	 */
542  	if (dmap->writable) {
543  		ret = 0;
544  		goto out_fill_iomap;
545  	}
546  
547  	ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true,
548  				     true);
549  	if (ret < 0)
550  		goto out_err;
551  out_fill_iomap:
552  	fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
553  out_err:
554  	up_write(&fi->dax->sem);
555  	return ret;
556  }
557  
558  /* This is just for DAX and the mapping is ephemeral, do not use it for other
559   * purposes since there is no block device with a permanent mapping.
560   */
fuse_iomap_begin(struct inode * inode,loff_t pos,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)561  static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
562  			    unsigned int flags, struct iomap *iomap,
563  			    struct iomap *srcmap)
564  {
565  	struct fuse_inode *fi = get_fuse_inode(inode);
566  	struct fuse_conn *fc = get_fuse_conn(inode);
567  	struct fuse_dax_mapping *dmap;
568  	bool writable = flags & IOMAP_WRITE;
569  	unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
570  	struct interval_tree_node *node;
571  
572  	/* We don't support FIEMAP */
573  	if (WARN_ON(flags & IOMAP_REPORT))
574  		return -EIO;
575  
576  	iomap->offset = pos;
577  	iomap->flags = 0;
578  	iomap->bdev = NULL;
579  	iomap->dax_dev = fc->dax->dev;
580  
581  	/*
582  	 * Both read/write and mmap path can race here. So we need something
583  	 * to make sure if we are setting up mapping, then other path waits
584  	 *
585  	 * For now, use a semaphore for this. It probably needs to be
586  	 * optimized later.
587  	 */
588  	down_read(&fi->dax->sem);
589  	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
590  	if (node) {
591  		dmap = node_to_dmap(node);
592  		if (writable && !dmap->writable) {
593  			/* Upgrade read-only mapping to read-write. This will
594  			 * require exclusive fi->dax->sem lock as we don't want
595  			 * two threads to be trying to this simultaneously
596  			 * for same dmap. So drop shared lock and acquire
597  			 * exclusive lock.
598  			 *
599  			 * Before dropping fi->dax->sem lock, take reference
600  			 * on dmap so that its not freed by range reclaim.
601  			 */
602  			refcount_inc(&dmap->refcnt);
603  			up_read(&fi->dax->sem);
604  			pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
605  				 __func__, pos, length);
606  			return fuse_upgrade_dax_mapping(inode, pos, length,
607  							flags, iomap);
608  		} else {
609  			fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
610  			up_read(&fi->dax->sem);
611  			return 0;
612  		}
613  	} else {
614  		up_read(&fi->dax->sem);
615  		pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n",
616  				__func__, pos, length);
617  		if (pos >= i_size_read(inode))
618  			goto iomap_hole;
619  
620  		return fuse_setup_new_dax_mapping(inode, pos, length, flags,
621  						  iomap);
622  	}
623  
624  	/*
625  	 * If read beyond end of file happens, fs code seems to return
626  	 * it as hole
627  	 */
628  iomap_hole:
629  	fuse_fill_iomap_hole(iomap, length);
630  	pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n",
631  		 __func__, pos, length, iomap->length);
632  	return 0;
633  }
634  
fuse_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)635  static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length,
636  			  ssize_t written, unsigned int flags,
637  			  struct iomap *iomap)
638  {
639  	struct fuse_dax_mapping *dmap = iomap->private;
640  
641  	if (dmap) {
642  		if (refcount_dec_and_test(&dmap->refcnt)) {
643  			/* refcount should not hit 0. This object only goes
644  			 * away when fuse connection goes away
645  			 */
646  			WARN_ON_ONCE(1);
647  		}
648  	}
649  
650  	/* DAX writes beyond end-of-file aren't handled using iomap, so the
651  	 * file size is unchanged and there is nothing to do here.
652  	 */
653  	return 0;
654  }
655  
656  static const struct iomap_ops fuse_iomap_ops = {
657  	.iomap_begin = fuse_iomap_begin,
658  	.iomap_end = fuse_iomap_end,
659  };
660  
fuse_wait_dax_page(struct inode * inode)661  static void fuse_wait_dax_page(struct inode *inode)
662  {
663  	filemap_invalidate_unlock(inode->i_mapping);
664  	schedule();
665  	filemap_invalidate_lock(inode->i_mapping);
666  }
667  
668  /* Should be called with mapping->invalidate_lock held exclusively */
__fuse_dax_break_layouts(struct inode * inode,bool * retry,loff_t start,loff_t end)669  static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
670  				    loff_t start, loff_t end)
671  {
672  	struct page *page;
673  
674  	page = dax_layout_busy_page_range(inode->i_mapping, start, end);
675  	if (!page)
676  		return 0;
677  
678  	*retry = true;
679  	return ___wait_var_event(&page->_refcount,
680  			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
681  			0, 0, fuse_wait_dax_page(inode));
682  }
683  
684  /* dmap_end == 0 leads to unmapping of whole file */
fuse_dax_break_layouts(struct inode * inode,u64 dmap_start,u64 dmap_end)685  int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
686  				  u64 dmap_end)
687  {
688  	bool	retry;
689  	int	ret;
690  
691  	do {
692  		retry = false;
693  		ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
694  					       dmap_end);
695  	} while (ret == 0 && retry);
696  
697  	return ret;
698  }
699  
fuse_dax_read_iter(struct kiocb * iocb,struct iov_iter * to)700  ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
701  {
702  	struct inode *inode = file_inode(iocb->ki_filp);
703  	ssize_t ret;
704  
705  	if (iocb->ki_flags & IOCB_NOWAIT) {
706  		if (!inode_trylock_shared(inode))
707  			return -EAGAIN;
708  	} else {
709  		inode_lock_shared(inode);
710  	}
711  
712  	ret = dax_iomap_rw(iocb, to, &fuse_iomap_ops);
713  	inode_unlock_shared(inode);
714  
715  	/* TODO file_accessed(iocb->f_filp) */
716  	return ret;
717  }
718  
file_extending_write(struct kiocb * iocb,struct iov_iter * from)719  static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from)
720  {
721  	struct inode *inode = file_inode(iocb->ki_filp);
722  
723  	return (iov_iter_rw(from) == WRITE &&
724  		((iocb->ki_pos) >= i_size_read(inode) ||
725  		  (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode))));
726  }
727  
fuse_dax_direct_write(struct kiocb * iocb,struct iov_iter * from)728  static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from)
729  {
730  	struct inode *inode = file_inode(iocb->ki_filp);
731  	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
732  	ssize_t ret;
733  
734  	ret = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
735  
736  	fuse_write_update_attr(inode, iocb->ki_pos, ret);
737  	return ret;
738  }
739  
fuse_dax_write_iter(struct kiocb * iocb,struct iov_iter * from)740  ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
741  {
742  	struct inode *inode = file_inode(iocb->ki_filp);
743  	ssize_t ret;
744  
745  	if (iocb->ki_flags & IOCB_NOWAIT) {
746  		if (!inode_trylock(inode))
747  			return -EAGAIN;
748  	} else {
749  		inode_lock(inode);
750  	}
751  
752  	ret = generic_write_checks(iocb, from);
753  	if (ret <= 0)
754  		goto out;
755  
756  	ret = file_remove_privs(iocb->ki_filp);
757  	if (ret)
758  		goto out;
759  	/* TODO file_update_time() but we don't want metadata I/O */
760  
761  	/* Do not use dax for file extending writes as write and on
762  	 * disk i_size increase are not atomic otherwise.
763  	 */
764  	if (file_extending_write(iocb, from))
765  		ret = fuse_dax_direct_write(iocb, from);
766  	else
767  		ret = dax_iomap_rw(iocb, from, &fuse_iomap_ops);
768  
769  out:
770  	inode_unlock(inode);
771  
772  	if (ret > 0)
773  		ret = generic_write_sync(iocb, ret);
774  	return ret;
775  }
776  
fuse_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)777  static int fuse_dax_writepages(struct address_space *mapping,
778  			       struct writeback_control *wbc)
779  {
780  
781  	struct inode *inode = mapping->host;
782  	struct fuse_conn *fc = get_fuse_conn(inode);
783  
784  	return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc);
785  }
786  
__fuse_dax_fault(struct vm_fault * vmf,unsigned int order,bool write)787  static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
788  		bool write)
789  {
790  	vm_fault_t ret;
791  	struct inode *inode = file_inode(vmf->vma->vm_file);
792  	struct super_block *sb = inode->i_sb;
793  	pfn_t pfn;
794  	int error = 0;
795  	struct fuse_conn *fc = get_fuse_conn(inode);
796  	struct fuse_conn_dax *fcd = fc->dax;
797  	bool retry = false;
798  
799  	if (write)
800  		sb_start_pagefault(sb);
801  retry:
802  	if (retry && !(fcd->nr_free_ranges > 0))
803  		wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
804  
805  	/*
806  	 * We need to serialize against not only truncate but also against
807  	 * fuse dax memory range reclaim. While a range is being reclaimed,
808  	 * we do not want any read/write/mmap to make progress and try
809  	 * to populate page cache or access memory we are trying to free.
810  	 */
811  	filemap_invalidate_lock_shared(inode->i_mapping);
812  	ret = dax_iomap_fault(vmf, order, &pfn, &error, &fuse_iomap_ops);
813  	if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
814  		error = 0;
815  		retry = true;
816  		filemap_invalidate_unlock_shared(inode->i_mapping);
817  		goto retry;
818  	}
819  
820  	if (ret & VM_FAULT_NEEDDSYNC)
821  		ret = dax_finish_sync_fault(vmf, order, pfn);
822  	filemap_invalidate_unlock_shared(inode->i_mapping);
823  
824  	if (write)
825  		sb_end_pagefault(sb);
826  
827  	return ret;
828  }
829  
fuse_dax_fault(struct vm_fault * vmf)830  static vm_fault_t fuse_dax_fault(struct vm_fault *vmf)
831  {
832  	return __fuse_dax_fault(vmf, 0, vmf->flags & FAULT_FLAG_WRITE);
833  }
834  
fuse_dax_huge_fault(struct vm_fault * vmf,unsigned int order)835  static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
836  {
837  	return __fuse_dax_fault(vmf, order, vmf->flags & FAULT_FLAG_WRITE);
838  }
839  
fuse_dax_page_mkwrite(struct vm_fault * vmf)840  static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf)
841  {
842  	return __fuse_dax_fault(vmf, 0, true);
843  }
844  
fuse_dax_pfn_mkwrite(struct vm_fault * vmf)845  static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf)
846  {
847  	return __fuse_dax_fault(vmf, 0, true);
848  }
849  
850  static const struct vm_operations_struct fuse_dax_vm_ops = {
851  	.fault		= fuse_dax_fault,
852  	.huge_fault	= fuse_dax_huge_fault,
853  	.page_mkwrite	= fuse_dax_page_mkwrite,
854  	.pfn_mkwrite	= fuse_dax_pfn_mkwrite,
855  };
856  
fuse_dax_mmap(struct file * file,struct vm_area_struct * vma)857  int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
858  {
859  	file_accessed(file);
860  	vma->vm_ops = &fuse_dax_vm_ops;
861  	vm_flags_set(vma, VM_MIXEDMAP | VM_HUGEPAGE);
862  	return 0;
863  }
864  
dmap_writeback_invalidate(struct inode * inode,struct fuse_dax_mapping * dmap)865  static int dmap_writeback_invalidate(struct inode *inode,
866  				     struct fuse_dax_mapping *dmap)
867  {
868  	int ret;
869  	loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT;
870  	loff_t end_pos = (start_pos + FUSE_DAX_SZ - 1);
871  
872  	ret = filemap_fdatawrite_range(inode->i_mapping, start_pos, end_pos);
873  	if (ret) {
874  		pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n",
875  			 ret, start_pos, end_pos);
876  		return ret;
877  	}
878  
879  	ret = invalidate_inode_pages2_range(inode->i_mapping,
880  					    start_pos >> PAGE_SHIFT,
881  					    end_pos >> PAGE_SHIFT);
882  	if (ret)
883  		pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n",
884  			 ret);
885  
886  	return ret;
887  }
888  
reclaim_one_dmap_locked(struct inode * inode,struct fuse_dax_mapping * dmap)889  static int reclaim_one_dmap_locked(struct inode *inode,
890  				   struct fuse_dax_mapping *dmap)
891  {
892  	int ret;
893  	struct fuse_inode *fi = get_fuse_inode(inode);
894  
895  	/*
896  	 * igrab() was done to make sure inode won't go under us, and this
897  	 * further avoids the race with evict().
898  	 */
899  	ret = dmap_writeback_invalidate(inode, dmap);
900  	if (ret)
901  		return ret;
902  
903  	/* Remove dax mapping from inode interval tree now */
904  	interval_tree_remove(&dmap->itn, &fi->dax->tree);
905  	fi->dax->nr--;
906  
907  	/* It is possible that umount/shutdown has killed the fuse connection
908  	 * and worker thread is trying to reclaim memory in parallel.  Don't
909  	 * warn in that case.
910  	 */
911  	ret = dmap_removemapping_one(inode, dmap);
912  	if (ret && ret != -ENOTCONN) {
913  		pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n",
914  			dmap->window_offset, dmap->length, ret);
915  	}
916  	return 0;
917  }
918  
919  /* Find first mapped dmap for an inode and return file offset. Caller needs
920   * to hold fi->dax->sem lock either shared or exclusive.
921   */
inode_lookup_first_dmap(struct inode * inode)922  static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode)
923  {
924  	struct fuse_inode *fi = get_fuse_inode(inode);
925  	struct fuse_dax_mapping *dmap;
926  	struct interval_tree_node *node;
927  
928  	for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
929  	     node = interval_tree_iter_next(node, 0, -1)) {
930  		dmap = node_to_dmap(node);
931  		/* still in use. */
932  		if (refcount_read(&dmap->refcnt) > 1)
933  			continue;
934  
935  		return dmap;
936  	}
937  
938  	return NULL;
939  }
940  
941  /*
942   * Find first mapping in the tree and free it and return it. Do not add
943   * it back to free pool.
944   */
945  static struct fuse_dax_mapping *
inode_inline_reclaim_one_dmap(struct fuse_conn_dax * fcd,struct inode * inode,bool * retry)946  inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
947  			      bool *retry)
948  {
949  	struct fuse_inode *fi = get_fuse_inode(inode);
950  	struct fuse_dax_mapping *dmap;
951  	u64 dmap_start, dmap_end;
952  	unsigned long start_idx;
953  	int ret;
954  	struct interval_tree_node *node;
955  
956  	filemap_invalidate_lock(inode->i_mapping);
957  
958  	/* Lookup a dmap and corresponding file offset to reclaim. */
959  	down_read(&fi->dax->sem);
960  	dmap = inode_lookup_first_dmap(inode);
961  	if (dmap) {
962  		start_idx = dmap->itn.start;
963  		dmap_start = start_idx << FUSE_DAX_SHIFT;
964  		dmap_end = dmap_start + FUSE_DAX_SZ - 1;
965  	}
966  	up_read(&fi->dax->sem);
967  
968  	if (!dmap)
969  		goto out_mmap_sem;
970  	/*
971  	 * Make sure there are no references to inode pages using
972  	 * get_user_pages()
973  	 */
974  	ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
975  	if (ret) {
976  		pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n",
977  			 ret);
978  		dmap = ERR_PTR(ret);
979  		goto out_mmap_sem;
980  	}
981  
982  	down_write(&fi->dax->sem);
983  	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
984  	/* Range already got reclaimed by somebody else */
985  	if (!node) {
986  		if (retry)
987  			*retry = true;
988  		goto out_write_dmap_sem;
989  	}
990  
991  	dmap = node_to_dmap(node);
992  	/* still in use. */
993  	if (refcount_read(&dmap->refcnt) > 1) {
994  		dmap = NULL;
995  		if (retry)
996  			*retry = true;
997  		goto out_write_dmap_sem;
998  	}
999  
1000  	ret = reclaim_one_dmap_locked(inode, dmap);
1001  	if (ret < 0) {
1002  		dmap = ERR_PTR(ret);
1003  		goto out_write_dmap_sem;
1004  	}
1005  
1006  	/* Clean up dmap. Do not add back to free list */
1007  	dmap_remove_busy_list(fcd, dmap);
1008  	dmap->inode = NULL;
1009  	dmap->itn.start = dmap->itn.last = 0;
1010  
1011  	pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n",
1012  		 __func__, inode, dmap->window_offset, dmap->length);
1013  
1014  out_write_dmap_sem:
1015  	up_write(&fi->dax->sem);
1016  out_mmap_sem:
1017  	filemap_invalidate_unlock(inode->i_mapping);
1018  	return dmap;
1019  }
1020  
1021  static struct fuse_dax_mapping *
alloc_dax_mapping_reclaim(struct fuse_conn_dax * fcd,struct inode * inode)1022  alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
1023  {
1024  	struct fuse_dax_mapping *dmap;
1025  	struct fuse_inode *fi = get_fuse_inode(inode);
1026  
1027  	while (1) {
1028  		bool retry = false;
1029  
1030  		dmap = alloc_dax_mapping(fcd);
1031  		if (dmap)
1032  			return dmap;
1033  
1034  		dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
1035  		/*
1036  		 * Either we got a mapping or it is an error, return in both
1037  		 * the cases.
1038  		 */
1039  		if (dmap)
1040  			return dmap;
1041  
1042  		/* If we could not reclaim a mapping because it
1043  		 * had a reference or some other temporary failure,
1044  		 * Try again. We want to give up inline reclaim only
1045  		 * if there is no range assigned to this node. Otherwise
1046  		 * if a deadlock is possible if we sleep with
1047  		 * mapping->invalidate_lock held and worker to free memory
1048  		 * can't make progress due to unavailability of
1049  		 * mapping->invalidate_lock.  So sleep only if fi->dax->nr=0
1050  		 */
1051  		if (retry)
1052  			continue;
1053  		/*
1054  		 * There are no mappings which can be reclaimed. Wait for one.
1055  		 * We are not holding fi->dax->sem. So it is possible
1056  		 * that range gets added now. But as we are not holding
1057  		 * mapping->invalidate_lock, worker should still be able to
1058  		 * free up a range and wake us up.
1059  		 */
1060  		if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
1061  			if (wait_event_killable_exclusive(fcd->range_waitq,
1062  					(fcd->nr_free_ranges > 0))) {
1063  				return ERR_PTR(-EINTR);
1064  			}
1065  		}
1066  	}
1067  }
1068  
lookup_and_reclaim_dmap_locked(struct fuse_conn_dax * fcd,struct inode * inode,unsigned long start_idx)1069  static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
1070  					  struct inode *inode,
1071  					  unsigned long start_idx)
1072  {
1073  	int ret;
1074  	struct fuse_inode *fi = get_fuse_inode(inode);
1075  	struct fuse_dax_mapping *dmap;
1076  	struct interval_tree_node *node;
1077  
1078  	/* Find fuse dax mapping at file offset inode. */
1079  	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
1080  
1081  	/* Range already got cleaned up by somebody else */
1082  	if (!node)
1083  		return 0;
1084  	dmap = node_to_dmap(node);
1085  
1086  	/* still in use. */
1087  	if (refcount_read(&dmap->refcnt) > 1)
1088  		return 0;
1089  
1090  	ret = reclaim_one_dmap_locked(inode, dmap);
1091  	if (ret < 0)
1092  		return ret;
1093  
1094  	/* Cleanup dmap entry and add back to free list */
1095  	spin_lock(&fcd->lock);
1096  	dmap_reinit_add_to_free_pool(fcd, dmap);
1097  	spin_unlock(&fcd->lock);
1098  	return ret;
1099  }
1100  
1101  /*
1102   * Free a range of memory.
1103   * Locking:
1104   * 1. Take mapping->invalidate_lock to block dax faults.
1105   * 2. Take fi->dax->sem to protect interval tree and also to make sure
1106   *    read/write can not reuse a dmap which we might be freeing.
1107   */
lookup_and_reclaim_dmap(struct fuse_conn_dax * fcd,struct inode * inode,unsigned long start_idx,unsigned long end_idx)1108  static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
1109  				   struct inode *inode,
1110  				   unsigned long start_idx,
1111  				   unsigned long end_idx)
1112  {
1113  	int ret;
1114  	struct fuse_inode *fi = get_fuse_inode(inode);
1115  	loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
1116  	loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
1117  
1118  	filemap_invalidate_lock(inode->i_mapping);
1119  	ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
1120  	if (ret) {
1121  		pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
1122  			 ret);
1123  		goto out_mmap_sem;
1124  	}
1125  
1126  	down_write(&fi->dax->sem);
1127  	ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
1128  	up_write(&fi->dax->sem);
1129  out_mmap_sem:
1130  	filemap_invalidate_unlock(inode->i_mapping);
1131  	return ret;
1132  }
1133  
try_to_free_dmap_chunks(struct fuse_conn_dax * fcd,unsigned long nr_to_free)1134  static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
1135  				   unsigned long nr_to_free)
1136  {
1137  	struct fuse_dax_mapping *dmap, *pos, *temp;
1138  	int ret, nr_freed = 0;
1139  	unsigned long start_idx = 0, end_idx = 0;
1140  	struct inode *inode = NULL;
1141  
1142  	/* Pick first busy range and free it for now*/
1143  	while (1) {
1144  		if (nr_freed >= nr_to_free)
1145  			break;
1146  
1147  		dmap = NULL;
1148  		spin_lock(&fcd->lock);
1149  
1150  		if (!fcd->nr_busy_ranges) {
1151  			spin_unlock(&fcd->lock);
1152  			return 0;
1153  		}
1154  
1155  		list_for_each_entry_safe(pos, temp, &fcd->busy_ranges,
1156  						busy_list) {
1157  			/* skip this range if it's in use. */
1158  			if (refcount_read(&pos->refcnt) > 1)
1159  				continue;
1160  
1161  			inode = igrab(pos->inode);
1162  			/*
1163  			 * This inode is going away. That will free
1164  			 * up all the ranges anyway, continue to
1165  			 * next range.
1166  			 */
1167  			if (!inode)
1168  				continue;
1169  			/*
1170  			 * Take this element off list and add it tail. If
1171  			 * this element can't be freed, it will help with
1172  			 * selecting new element in next iteration of loop.
1173  			 */
1174  			dmap = pos;
1175  			list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
1176  			start_idx = end_idx = dmap->itn.start;
1177  			break;
1178  		}
1179  		spin_unlock(&fcd->lock);
1180  		if (!dmap)
1181  			return 0;
1182  
1183  		ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx);
1184  		iput(inode);
1185  		if (ret)
1186  			return ret;
1187  		nr_freed++;
1188  	}
1189  	return 0;
1190  }
1191  
fuse_dax_free_mem_worker(struct work_struct * work)1192  static void fuse_dax_free_mem_worker(struct work_struct *work)
1193  {
1194  	int ret;
1195  	struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
1196  						 free_work.work);
1197  	ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK);
1198  	if (ret) {
1199  		pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n",
1200  			 ret);
1201  	}
1202  
1203  	/* If number of free ranges are still below threshold, requeue */
1204  	kick_dmap_free_worker(fcd, 1);
1205  }
1206  
fuse_free_dax_mem_ranges(struct list_head * mem_list)1207  static void fuse_free_dax_mem_ranges(struct list_head *mem_list)
1208  {
1209  	struct fuse_dax_mapping *range, *temp;
1210  
1211  	/* Free All allocated elements */
1212  	list_for_each_entry_safe(range, temp, mem_list, list) {
1213  		list_del(&range->list);
1214  		if (!list_empty(&range->busy_list))
1215  			list_del(&range->busy_list);
1216  		kfree(range);
1217  	}
1218  }
1219  
fuse_dax_conn_free(struct fuse_conn * fc)1220  void fuse_dax_conn_free(struct fuse_conn *fc)
1221  {
1222  	if (fc->dax) {
1223  		fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
1224  		kfree(fc->dax);
1225  		fc->dax = NULL;
1226  	}
1227  }
1228  
fuse_dax_mem_range_init(struct fuse_conn_dax * fcd)1229  static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
1230  {
1231  	long nr_pages, nr_ranges;
1232  	struct fuse_dax_mapping *range;
1233  	int ret, id;
1234  	size_t dax_size = -1;
1235  	unsigned long i;
1236  
1237  	init_waitqueue_head(&fcd->range_waitq);
1238  	INIT_LIST_HEAD(&fcd->free_ranges);
1239  	INIT_LIST_HEAD(&fcd->busy_ranges);
1240  	INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
1241  
1242  	id = dax_read_lock();
1243  	nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
1244  			DAX_ACCESS, NULL, NULL);
1245  	dax_read_unlock(id);
1246  	if (nr_pages < 0) {
1247  		pr_debug("dax_direct_access() returned %ld\n", nr_pages);
1248  		return nr_pages;
1249  	}
1250  
1251  	nr_ranges = nr_pages/FUSE_DAX_PAGES;
1252  	pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
1253  		__func__, nr_pages, nr_ranges);
1254  
1255  	for (i = 0; i < nr_ranges; i++) {
1256  		range = kzalloc(sizeof(struct fuse_dax_mapping), GFP_KERNEL);
1257  		ret = -ENOMEM;
1258  		if (!range)
1259  			goto out_err;
1260  
1261  		/* TODO: This offset only works if virtio-fs driver is not
1262  		 * having some memory hidden at the beginning. This needs
1263  		 * better handling
1264  		 */
1265  		range->window_offset = i * FUSE_DAX_SZ;
1266  		range->length = FUSE_DAX_SZ;
1267  		INIT_LIST_HEAD(&range->busy_list);
1268  		refcount_set(&range->refcnt, 1);
1269  		list_add_tail(&range->list, &fcd->free_ranges);
1270  	}
1271  
1272  	fcd->nr_free_ranges = nr_ranges;
1273  	fcd->nr_ranges = nr_ranges;
1274  	return 0;
1275  out_err:
1276  	/* Free All allocated elements */
1277  	fuse_free_dax_mem_ranges(&fcd->free_ranges);
1278  	return ret;
1279  }
1280  
fuse_dax_conn_alloc(struct fuse_conn * fc,enum fuse_dax_mode dax_mode,struct dax_device * dax_dev)1281  int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode dax_mode,
1282  			struct dax_device *dax_dev)
1283  {
1284  	struct fuse_conn_dax *fcd;
1285  	int err;
1286  
1287  	fc->dax_mode = dax_mode;
1288  
1289  	if (!dax_dev)
1290  		return 0;
1291  
1292  	fcd = kzalloc(sizeof(*fcd), GFP_KERNEL);
1293  	if (!fcd)
1294  		return -ENOMEM;
1295  
1296  	spin_lock_init(&fcd->lock);
1297  	fcd->dev = dax_dev;
1298  	err = fuse_dax_mem_range_init(fcd);
1299  	if (err) {
1300  		kfree(fcd);
1301  		return err;
1302  	}
1303  
1304  	fc->dax = fcd;
1305  	return 0;
1306  }
1307  
fuse_dax_inode_alloc(struct super_block * sb,struct fuse_inode * fi)1308  bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
1309  {
1310  	struct fuse_conn *fc = get_fuse_conn_super(sb);
1311  
1312  	fi->dax = NULL;
1313  	if (fc->dax) {
1314  		fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT);
1315  		if (!fi->dax)
1316  			return false;
1317  
1318  		init_rwsem(&fi->dax->sem);
1319  		fi->dax->tree = RB_ROOT_CACHED;
1320  	}
1321  
1322  	return true;
1323  }
1324  
1325  static const struct address_space_operations fuse_dax_file_aops  = {
1326  	.writepages	= fuse_dax_writepages,
1327  	.direct_IO	= noop_direct_IO,
1328  	.dirty_folio	= noop_dirty_folio,
1329  };
1330  
fuse_should_enable_dax(struct inode * inode,unsigned int flags)1331  static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags)
1332  {
1333  	struct fuse_conn *fc = get_fuse_conn(inode);
1334  	enum fuse_dax_mode dax_mode = fc->dax_mode;
1335  
1336  	if (dax_mode == FUSE_DAX_NEVER)
1337  		return false;
1338  
1339  	/*
1340  	 * fc->dax may be NULL in 'inode' mode when filesystem device doesn't
1341  	 * support DAX, in which case it will silently fallback to 'never' mode.
1342  	 */
1343  	if (!fc->dax)
1344  		return false;
1345  
1346  	if (dax_mode == FUSE_DAX_ALWAYS)
1347  		return true;
1348  
1349  	/* dax_mode is FUSE_DAX_INODE* */
1350  	return fc->inode_dax && (flags & FUSE_ATTR_DAX);
1351  }
1352  
fuse_dax_inode_init(struct inode * inode,unsigned int flags)1353  void fuse_dax_inode_init(struct inode *inode, unsigned int flags)
1354  {
1355  	if (!fuse_should_enable_dax(inode, flags))
1356  		return;
1357  
1358  	inode->i_flags |= S_DAX;
1359  	inode->i_data.a_ops = &fuse_dax_file_aops;
1360  }
1361  
fuse_dax_dontcache(struct inode * inode,unsigned int flags)1362  void fuse_dax_dontcache(struct inode *inode, unsigned int flags)
1363  {
1364  	struct fuse_conn *fc = get_fuse_conn(inode);
1365  
1366  	if (fuse_is_inode_dax_mode(fc->dax_mode) &&
1367  	    ((bool) IS_DAX(inode) != (bool) (flags & FUSE_ATTR_DAX)))
1368  		d_mark_dontcache(inode);
1369  }
1370  
fuse_dax_check_alignment(struct fuse_conn * fc,unsigned int map_alignment)1371  bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
1372  {
1373  	if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
1374  		pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
1375  			map_alignment, FUSE_DAX_SZ);
1376  		return false;
1377  	}
1378  	return true;
1379  }
1380  
fuse_dax_cancel_work(struct fuse_conn * fc)1381  void fuse_dax_cancel_work(struct fuse_conn *fc)
1382  {
1383  	struct fuse_conn_dax *fcd = fc->dax;
1384  
1385  	if (fcd)
1386  		cancel_delayed_work_sync(&fcd->free_work);
1387  
1388  }
1389  EXPORT_SYMBOL_GPL(fuse_dax_cancel_work);
1390