1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * FUSE inode io modes.
4   *
5   * Copyright (c) 2024 CTERA Networks.
6   */
7  
8  #include "fuse_i.h"
9  
10  #include <linux/kernel.h>
11  #include <linux/sched.h>
12  #include <linux/file.h>
13  #include <linux/fs.h>
14  
15  /*
16   * Return true if need to wait for new opens in caching mode.
17   */
fuse_is_io_cache_wait(struct fuse_inode * fi)18  static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi)
19  {
20  	return READ_ONCE(fi->iocachectr) < 0 && !fuse_inode_backing(fi);
21  }
22  
23  /*
24   * Called on cached file open() and on first mmap() of direct_io file.
25   * Takes cached_io inode mode reference to be dropped on file release.
26   *
27   * Blocks new parallel dio writes and waits for the in-progress parallel dio
28   * writes to complete.
29   */
fuse_file_cached_io_open(struct inode * inode,struct fuse_file * ff)30  int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff)
31  {
32  	struct fuse_inode *fi = get_fuse_inode(inode);
33  
34  	/* There are no io modes if server does not implement open */
35  	if (!ff->args)
36  		return 0;
37  
38  	spin_lock(&fi->lock);
39  	/*
40  	 * Setting the bit advises new direct-io writes to use an exclusive
41  	 * lock - without it the wait below might be forever.
42  	 */
43  	while (fuse_is_io_cache_wait(fi)) {
44  		set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
45  		spin_unlock(&fi->lock);
46  		wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi));
47  		spin_lock(&fi->lock);
48  	}
49  
50  	/*
51  	 * Check if inode entered passthrough io mode while waiting for parallel
52  	 * dio write completion.
53  	 */
54  	if (fuse_inode_backing(fi)) {
55  		clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
56  		spin_unlock(&fi->lock);
57  		return -ETXTBSY;
58  	}
59  
60  	WARN_ON(ff->iomode == IOM_UNCACHED);
61  	if (ff->iomode == IOM_NONE) {
62  		ff->iomode = IOM_CACHED;
63  		if (fi->iocachectr == 0)
64  			set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
65  		fi->iocachectr++;
66  	}
67  	spin_unlock(&fi->lock);
68  	return 0;
69  }
70  
fuse_file_cached_io_release(struct fuse_file * ff,struct fuse_inode * fi)71  static void fuse_file_cached_io_release(struct fuse_file *ff,
72  					struct fuse_inode *fi)
73  {
74  	spin_lock(&fi->lock);
75  	WARN_ON(fi->iocachectr <= 0);
76  	WARN_ON(ff->iomode != IOM_CACHED);
77  	ff->iomode = IOM_NONE;
78  	fi->iocachectr--;
79  	if (fi->iocachectr == 0)
80  		clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
81  	spin_unlock(&fi->lock);
82  }
83  
84  /* Start strictly uncached io mode where cache access is not allowed */
fuse_inode_uncached_io_start(struct fuse_inode * fi,struct fuse_backing * fb)85  int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb)
86  {
87  	struct fuse_backing *oldfb;
88  	int err = 0;
89  
90  	spin_lock(&fi->lock);
91  	/* deny conflicting backing files on same fuse inode */
92  	oldfb = fuse_inode_backing(fi);
93  	if (fb && oldfb && oldfb != fb) {
94  		err = -EBUSY;
95  		goto unlock;
96  	}
97  	if (fi->iocachectr > 0) {
98  		err = -ETXTBSY;
99  		goto unlock;
100  	}
101  	fi->iocachectr--;
102  
103  	/* fuse inode holds a single refcount of backing file */
104  	if (fb && !oldfb) {
105  		oldfb = fuse_inode_backing_set(fi, fb);
106  		WARN_ON_ONCE(oldfb != NULL);
107  	} else {
108  		fuse_backing_put(fb);
109  	}
110  unlock:
111  	spin_unlock(&fi->lock);
112  	return err;
113  }
114  
115  /* Takes uncached_io inode mode reference to be dropped on file release */
fuse_file_uncached_io_open(struct inode * inode,struct fuse_file * ff,struct fuse_backing * fb)116  static int fuse_file_uncached_io_open(struct inode *inode,
117  				      struct fuse_file *ff,
118  				      struct fuse_backing *fb)
119  {
120  	struct fuse_inode *fi = get_fuse_inode(inode);
121  	int err;
122  
123  	err = fuse_inode_uncached_io_start(fi, fb);
124  	if (err)
125  		return err;
126  
127  	WARN_ON(ff->iomode != IOM_NONE);
128  	ff->iomode = IOM_UNCACHED;
129  	return 0;
130  }
131  
fuse_inode_uncached_io_end(struct fuse_inode * fi)132  void fuse_inode_uncached_io_end(struct fuse_inode *fi)
133  {
134  	struct fuse_backing *oldfb = NULL;
135  
136  	spin_lock(&fi->lock);
137  	WARN_ON(fi->iocachectr >= 0);
138  	fi->iocachectr++;
139  	if (!fi->iocachectr) {
140  		wake_up(&fi->direct_io_waitq);
141  		oldfb = fuse_inode_backing_set(fi, NULL);
142  	}
143  	spin_unlock(&fi->lock);
144  	if (oldfb)
145  		fuse_backing_put(oldfb);
146  }
147  
148  /* Drop uncached_io reference from passthrough open */
fuse_file_uncached_io_release(struct fuse_file * ff,struct fuse_inode * fi)149  static void fuse_file_uncached_io_release(struct fuse_file *ff,
150  					  struct fuse_inode *fi)
151  {
152  	WARN_ON(ff->iomode != IOM_UNCACHED);
153  	ff->iomode = IOM_NONE;
154  	fuse_inode_uncached_io_end(fi);
155  }
156  
157  /*
158   * Open flags that are allowed in combination with FOPEN_PASSTHROUGH.
159   * A combination of FOPEN_PASSTHROUGH and FOPEN_DIRECT_IO means that read/write
160   * operations go directly to the server, but mmap is done on the backing file.
161   * FOPEN_PASSTHROUGH mode should not co-exist with any users of the fuse inode
162   * page cache, so FOPEN_KEEP_CACHE is a strange and undesired combination.
163   */
164  #define FOPEN_PASSTHROUGH_MASK \
165  	(FOPEN_PASSTHROUGH | FOPEN_DIRECT_IO | FOPEN_PARALLEL_DIRECT_WRITES | \
166  	 FOPEN_NOFLUSH)
167  
fuse_file_passthrough_open(struct inode * inode,struct file * file)168  static int fuse_file_passthrough_open(struct inode *inode, struct file *file)
169  {
170  	struct fuse_file *ff = file->private_data;
171  	struct fuse_conn *fc = get_fuse_conn(inode);
172  	struct fuse_backing *fb;
173  	int err;
174  
175  	/* Check allowed conditions for file open in passthrough mode */
176  	if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH) || !fc->passthrough ||
177  	    (ff->open_flags & ~FOPEN_PASSTHROUGH_MASK))
178  		return -EINVAL;
179  
180  	fb = fuse_passthrough_open(file, inode,
181  				   ff->args->open_outarg.backing_id);
182  	if (IS_ERR(fb))
183  		return PTR_ERR(fb);
184  
185  	/* First passthrough file open denies caching inode io mode */
186  	err = fuse_file_uncached_io_open(inode, ff, fb);
187  	if (!err)
188  		return 0;
189  
190  	fuse_passthrough_release(ff, fb);
191  	fuse_backing_put(fb);
192  
193  	return err;
194  }
195  
196  /* Request access to submit new io to inode via open file */
fuse_file_io_open(struct file * file,struct inode * inode)197  int fuse_file_io_open(struct file *file, struct inode *inode)
198  {
199  	struct fuse_file *ff = file->private_data;
200  	struct fuse_inode *fi = get_fuse_inode(inode);
201  	int err;
202  
203  	/*
204  	 * io modes are not relevant with DAX and with server that does not
205  	 * implement open.
206  	 */
207  	if (FUSE_IS_DAX(inode) || !ff->args)
208  		return 0;
209  
210  	/*
211  	 * Server is expected to use FOPEN_PASSTHROUGH for all opens of an inode
212  	 * which is already open for passthrough.
213  	 */
214  	err = -EINVAL;
215  	if (fuse_inode_backing(fi) && !(ff->open_flags & FOPEN_PASSTHROUGH))
216  		goto fail;
217  
218  	/*
219  	 * FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO.
220  	 */
221  	if (!(ff->open_flags & FOPEN_DIRECT_IO))
222  		ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES;
223  
224  	/*
225  	 * First passthrough file open denies caching inode io mode.
226  	 * First caching file open enters caching inode io mode.
227  	 *
228  	 * Note that if user opens a file open with O_DIRECT, but server did
229  	 * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT,
230  	 * so we put the inode in caching mode to prevent parallel dio.
231  	 */
232  	if ((ff->open_flags & FOPEN_DIRECT_IO) &&
233  	    !(ff->open_flags & FOPEN_PASSTHROUGH))
234  		return 0;
235  
236  	if (ff->open_flags & FOPEN_PASSTHROUGH)
237  		err = fuse_file_passthrough_open(inode, file);
238  	else
239  		err = fuse_file_cached_io_open(inode, ff);
240  	if (err)
241  		goto fail;
242  
243  	return 0;
244  
245  fail:
246  	pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n",
247  		 ff->open_flags, err);
248  	/*
249  	 * The file open mode determines the inode io mode.
250  	 * Using incorrect open mode is a server mistake, which results in
251  	 * user visible failure of open() with EIO error.
252  	 */
253  	return -EIO;
254  }
255  
256  /* No more pending io and no new io possible to inode via open/mmapped file */
fuse_file_io_release(struct fuse_file * ff,struct inode * inode)257  void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
258  {
259  	struct fuse_inode *fi = get_fuse_inode(inode);
260  
261  	/*
262  	 * Last passthrough file close allows caching inode io mode.
263  	 * Last caching file close exits caching inode io mode.
264  	 */
265  	switch (ff->iomode) {
266  	case IOM_NONE:
267  		/* Nothing to do */
268  		break;
269  	case IOM_UNCACHED:
270  		fuse_file_uncached_io_release(ff, fi);
271  		break;
272  	case IOM_CACHED:
273  		fuse_file_cached_io_release(ff, fi);
274  		break;
275  	}
276  }
277