1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/kernel.h>
3  #include <linux/errno.h>
4  #include <linux/fs.h>
5  #include <linux/file.h>
6  #include <linux/fdtable.h>
7  #include <linux/fsnotify.h>
8  #include <linux/namei.h>
9  #include <linux/io_uring.h>
10  
11  #include <uapi/linux/io_uring.h>
12  
13  #include "../fs/internal.h"
14  
15  #include "io_uring.h"
16  #include "rsrc.h"
17  #include "openclose.h"
18  
19  struct io_open {
20  	struct file			*file;
21  	int				dfd;
22  	u32				file_slot;
23  	struct filename			*filename;
24  	struct open_how			how;
25  	unsigned long			nofile;
26  };
27  
28  struct io_close {
29  	struct file			*file;
30  	int				fd;
31  	u32				file_slot;
32  };
33  
34  struct io_fixed_install {
35  	struct file			*file;
36  	unsigned int			o_flags;
37  };
38  
io_openat_force_async(struct io_open * open)39  static bool io_openat_force_async(struct io_open *open)
40  {
41  	/*
42  	 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
43  	 * it'll always -EAGAIN. Note that we test for __O_TMPFILE because
44  	 * O_TMPFILE includes O_DIRECTORY, which isn't a flag we need to force
45  	 * async for.
46  	 */
47  	return open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE);
48  }
49  
__io_openat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)50  static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
51  {
52  	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
53  	const char __user *fname;
54  	int ret;
55  
56  	if (unlikely(sqe->buf_index))
57  		return -EINVAL;
58  	if (unlikely(req->flags & REQ_F_FIXED_FILE))
59  		return -EBADF;
60  
61  	/* open.how should be already initialised */
62  	if (!(open->how.flags & O_PATH) && force_o_largefile())
63  		open->how.flags |= O_LARGEFILE;
64  
65  	open->dfd = READ_ONCE(sqe->fd);
66  	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
67  	open->filename = getname(fname);
68  	if (IS_ERR(open->filename)) {
69  		ret = PTR_ERR(open->filename);
70  		open->filename = NULL;
71  		return ret;
72  	}
73  
74  	open->file_slot = READ_ONCE(sqe->file_index);
75  	if (open->file_slot && (open->how.flags & O_CLOEXEC))
76  		return -EINVAL;
77  
78  	open->nofile = rlimit(RLIMIT_NOFILE);
79  	req->flags |= REQ_F_NEED_CLEANUP;
80  	if (io_openat_force_async(open))
81  		req->flags |= REQ_F_FORCE_ASYNC;
82  	return 0;
83  }
84  
io_openat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)85  int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
86  {
87  	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
88  	u64 mode = READ_ONCE(sqe->len);
89  	u64 flags = READ_ONCE(sqe->open_flags);
90  
91  	open->how = build_open_how(flags, mode);
92  	return __io_openat_prep(req, sqe);
93  }
94  
io_openat2_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)95  int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
96  {
97  	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
98  	struct open_how __user *how;
99  	size_t len;
100  	int ret;
101  
102  	how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
103  	len = READ_ONCE(sqe->len);
104  	if (len < OPEN_HOW_SIZE_VER0)
105  		return -EINVAL;
106  
107  	ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
108  	if (ret)
109  		return ret;
110  
111  	return __io_openat_prep(req, sqe);
112  }
113  
io_openat2(struct io_kiocb * req,unsigned int issue_flags)114  int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
115  {
116  	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
117  	struct open_flags op;
118  	struct file *file;
119  	bool resolve_nonblock, nonblock_set;
120  	bool fixed = !!open->file_slot;
121  	int ret;
122  
123  	ret = build_open_flags(&open->how, &op);
124  	if (ret)
125  		goto err;
126  	nonblock_set = op.open_flag & O_NONBLOCK;
127  	resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
128  	if (issue_flags & IO_URING_F_NONBLOCK) {
129  		WARN_ON_ONCE(io_openat_force_async(open));
130  		op.lookup_flags |= LOOKUP_CACHED;
131  		op.open_flag |= O_NONBLOCK;
132  	}
133  
134  	if (!fixed) {
135  		ret = __get_unused_fd_flags(open->how.flags, open->nofile);
136  		if (ret < 0)
137  			goto err;
138  	}
139  
140  	file = do_filp_open(open->dfd, open->filename, &op);
141  	if (IS_ERR(file)) {
142  		/*
143  		 * We could hang on to this 'fd' on retrying, but seems like
144  		 * marginal gain for something that is now known to be a slower
145  		 * path. So just put it, and we'll get a new one when we retry.
146  		 */
147  		if (!fixed)
148  			put_unused_fd(ret);
149  
150  		ret = PTR_ERR(file);
151  		/* only retry if RESOLVE_CACHED wasn't already set by application */
152  		if (ret == -EAGAIN &&
153  		    (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
154  			return -EAGAIN;
155  		goto err;
156  	}
157  
158  	if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
159  		file->f_flags &= ~O_NONBLOCK;
160  
161  	if (!fixed)
162  		fd_install(ret, file);
163  	else
164  		ret = io_fixed_fd_install(req, issue_flags, file,
165  						open->file_slot);
166  err:
167  	putname(open->filename);
168  	req->flags &= ~REQ_F_NEED_CLEANUP;
169  	if (ret < 0)
170  		req_set_fail(req);
171  	io_req_set_res(req, ret, 0);
172  	return IOU_OK;
173  }
174  
io_openat(struct io_kiocb * req,unsigned int issue_flags)175  int io_openat(struct io_kiocb *req, unsigned int issue_flags)
176  {
177  	return io_openat2(req, issue_flags);
178  }
179  
io_open_cleanup(struct io_kiocb * req)180  void io_open_cleanup(struct io_kiocb *req)
181  {
182  	struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
183  
184  	if (open->filename)
185  		putname(open->filename);
186  }
187  
__io_close_fixed(struct io_ring_ctx * ctx,unsigned int issue_flags,unsigned int offset)188  int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
189  		     unsigned int offset)
190  {
191  	int ret;
192  
193  	io_ring_submit_lock(ctx, issue_flags);
194  	ret = io_fixed_fd_remove(ctx, offset);
195  	io_ring_submit_unlock(ctx, issue_flags);
196  
197  	return ret;
198  }
199  
io_close_fixed(struct io_kiocb * req,unsigned int issue_flags)200  static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
201  {
202  	struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
203  
204  	return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
205  }
206  
io_close_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)207  int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
208  {
209  	struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
210  
211  	if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
212  		return -EINVAL;
213  	if (req->flags & REQ_F_FIXED_FILE)
214  		return -EBADF;
215  
216  	close->fd = READ_ONCE(sqe->fd);
217  	close->file_slot = READ_ONCE(sqe->file_index);
218  	if (close->file_slot && close->fd)
219  		return -EINVAL;
220  
221  	return 0;
222  }
223  
io_close(struct io_kiocb * req,unsigned int issue_flags)224  int io_close(struct io_kiocb *req, unsigned int issue_flags)
225  {
226  	struct files_struct *files = current->files;
227  	struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
228  	struct file *file;
229  	int ret = -EBADF;
230  
231  	if (close->file_slot) {
232  		ret = io_close_fixed(req, issue_flags);
233  		goto err;
234  	}
235  
236  	spin_lock(&files->file_lock);
237  	file = files_lookup_fd_locked(files, close->fd);
238  	if (!file || io_is_uring_fops(file)) {
239  		spin_unlock(&files->file_lock);
240  		goto err;
241  	}
242  
243  	/* if the file has a flush method, be safe and punt to async */
244  	if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
245  		spin_unlock(&files->file_lock);
246  		return -EAGAIN;
247  	}
248  
249  	file = file_close_fd_locked(files, close->fd);
250  	spin_unlock(&files->file_lock);
251  	if (!file)
252  		goto err;
253  
254  	/* No ->flush() or already async, safely close from here */
255  	ret = filp_close(file, current->files);
256  err:
257  	if (ret < 0)
258  		req_set_fail(req);
259  	io_req_set_res(req, ret, 0);
260  	return IOU_OK;
261  }
262  
io_install_fixed_fd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)263  int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
264  {
265  	struct io_fixed_install *ifi;
266  	unsigned int flags;
267  
268  	if (sqe->off || sqe->addr || sqe->len || sqe->buf_index ||
269  	    sqe->splice_fd_in || sqe->addr3)
270  		return -EINVAL;
271  
272  	/* must be a fixed file */
273  	if (!(req->flags & REQ_F_FIXED_FILE))
274  		return -EBADF;
275  
276  	flags = READ_ONCE(sqe->install_fd_flags);
277  	if (flags & ~IORING_FIXED_FD_NO_CLOEXEC)
278  		return -EINVAL;
279  
280  	/* ensure the task's creds are used when installing/receiving fds */
281  	if (req->flags & REQ_F_CREDS)
282  		return -EPERM;
283  
284  	/* default to O_CLOEXEC, disable if IORING_FIXED_FD_NO_CLOEXEC is set */
285  	ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
286  	ifi->o_flags = O_CLOEXEC;
287  	if (flags & IORING_FIXED_FD_NO_CLOEXEC)
288  		ifi->o_flags = 0;
289  
290  	return 0;
291  }
292  
io_install_fixed_fd(struct io_kiocb * req,unsigned int issue_flags)293  int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags)
294  {
295  	struct io_fixed_install *ifi;
296  	int ret;
297  
298  	ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
299  	ret = receive_fd(req->file, NULL, ifi->o_flags);
300  	if (ret < 0)
301  		req_set_fail(req);
302  	io_req_set_res(req, ret, 0);
303  	return IOU_OK;
304  }
305