1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/kernel.h>
3  #include <linux/errno.h>
4  #include <linux/fs.h>
5  #include <linux/file.h>
6  #include <linux/mm.h>
7  #include <linux/slab.h>
8  #include <linux/namei.h>
9  #include <linux/io_uring.h>
10  
11  #include <uapi/linux/io_uring.h>
12  
13  #include "../fs/internal.h"
14  
15  #include "io_uring.h"
16  #include "fs.h"
17  
18  struct io_rename {
19  	struct file			*file;
20  	int				old_dfd;
21  	int				new_dfd;
22  	struct filename			*oldpath;
23  	struct filename			*newpath;
24  	int				flags;
25  };
26  
27  struct io_unlink {
28  	struct file			*file;
29  	int				dfd;
30  	int				flags;
31  	struct filename			*filename;
32  };
33  
34  struct io_mkdir {
35  	struct file			*file;
36  	int				dfd;
37  	umode_t				mode;
38  	struct filename			*filename;
39  };
40  
41  struct io_link {
42  	struct file			*file;
43  	int				old_dfd;
44  	int				new_dfd;
45  	struct filename			*oldpath;
46  	struct filename			*newpath;
47  	int				flags;
48  };
49  
io_renameat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)50  int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
51  {
52  	struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
53  	const char __user *oldf, *newf;
54  
55  	if (sqe->buf_index || sqe->splice_fd_in)
56  		return -EINVAL;
57  	if (unlikely(req->flags & REQ_F_FIXED_FILE))
58  		return -EBADF;
59  
60  	ren->old_dfd = READ_ONCE(sqe->fd);
61  	oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
62  	newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
63  	ren->new_dfd = READ_ONCE(sqe->len);
64  	ren->flags = READ_ONCE(sqe->rename_flags);
65  
66  	ren->oldpath = getname(oldf);
67  	if (IS_ERR(ren->oldpath))
68  		return PTR_ERR(ren->oldpath);
69  
70  	ren->newpath = getname(newf);
71  	if (IS_ERR(ren->newpath)) {
72  		putname(ren->oldpath);
73  		return PTR_ERR(ren->newpath);
74  	}
75  
76  	req->flags |= REQ_F_NEED_CLEANUP;
77  	req->flags |= REQ_F_FORCE_ASYNC;
78  	return 0;
79  }
80  
io_renameat(struct io_kiocb * req,unsigned int issue_flags)81  int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
82  {
83  	struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
84  	int ret;
85  
86  	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
87  
88  	ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
89  				ren->newpath, ren->flags);
90  
91  	req->flags &= ~REQ_F_NEED_CLEANUP;
92  	io_req_set_res(req, ret, 0);
93  	return IOU_OK;
94  }
95  
io_renameat_cleanup(struct io_kiocb * req)96  void io_renameat_cleanup(struct io_kiocb *req)
97  {
98  	struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
99  
100  	putname(ren->oldpath);
101  	putname(ren->newpath);
102  }
103  
io_unlinkat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)104  int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
105  {
106  	struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
107  	const char __user *fname;
108  
109  	if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in)
110  		return -EINVAL;
111  	if (unlikely(req->flags & REQ_F_FIXED_FILE))
112  		return -EBADF;
113  
114  	un->dfd = READ_ONCE(sqe->fd);
115  
116  	un->flags = READ_ONCE(sqe->unlink_flags);
117  	if (un->flags & ~AT_REMOVEDIR)
118  		return -EINVAL;
119  
120  	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
121  	un->filename = getname(fname);
122  	if (IS_ERR(un->filename))
123  		return PTR_ERR(un->filename);
124  
125  	req->flags |= REQ_F_NEED_CLEANUP;
126  	req->flags |= REQ_F_FORCE_ASYNC;
127  	return 0;
128  }
129  
io_unlinkat(struct io_kiocb * req,unsigned int issue_flags)130  int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
131  {
132  	struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
133  	int ret;
134  
135  	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
136  
137  	if (un->flags & AT_REMOVEDIR)
138  		ret = do_rmdir(un->dfd, un->filename);
139  	else
140  		ret = do_unlinkat(un->dfd, un->filename);
141  
142  	req->flags &= ~REQ_F_NEED_CLEANUP;
143  	io_req_set_res(req, ret, 0);
144  	return IOU_OK;
145  }
146  
io_unlinkat_cleanup(struct io_kiocb * req)147  void io_unlinkat_cleanup(struct io_kiocb *req)
148  {
149  	struct io_unlink *ul = io_kiocb_to_cmd(req, struct io_unlink);
150  
151  	putname(ul->filename);
152  }
153  
io_mkdirat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)154  int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
155  {
156  	struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
157  	const char __user *fname;
158  
159  	if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
160  		return -EINVAL;
161  	if (unlikely(req->flags & REQ_F_FIXED_FILE))
162  		return -EBADF;
163  
164  	mkd->dfd = READ_ONCE(sqe->fd);
165  	mkd->mode = READ_ONCE(sqe->len);
166  
167  	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
168  	mkd->filename = getname(fname);
169  	if (IS_ERR(mkd->filename))
170  		return PTR_ERR(mkd->filename);
171  
172  	req->flags |= REQ_F_NEED_CLEANUP;
173  	req->flags |= REQ_F_FORCE_ASYNC;
174  	return 0;
175  }
176  
io_mkdirat(struct io_kiocb * req,unsigned int issue_flags)177  int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
178  {
179  	struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
180  	int ret;
181  
182  	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
183  
184  	ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
185  
186  	req->flags &= ~REQ_F_NEED_CLEANUP;
187  	io_req_set_res(req, ret, 0);
188  	return IOU_OK;
189  }
190  
io_mkdirat_cleanup(struct io_kiocb * req)191  void io_mkdirat_cleanup(struct io_kiocb *req)
192  {
193  	struct io_mkdir *md = io_kiocb_to_cmd(req, struct io_mkdir);
194  
195  	putname(md->filename);
196  }
197  
io_symlinkat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)198  int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
199  {
200  	struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
201  	const char __user *oldpath, *newpath;
202  
203  	if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
204  		return -EINVAL;
205  	if (unlikely(req->flags & REQ_F_FIXED_FILE))
206  		return -EBADF;
207  
208  	sl->new_dfd = READ_ONCE(sqe->fd);
209  	oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
210  	newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
211  
212  	sl->oldpath = getname(oldpath);
213  	if (IS_ERR(sl->oldpath))
214  		return PTR_ERR(sl->oldpath);
215  
216  	sl->newpath = getname(newpath);
217  	if (IS_ERR(sl->newpath)) {
218  		putname(sl->oldpath);
219  		return PTR_ERR(sl->newpath);
220  	}
221  
222  	req->flags |= REQ_F_NEED_CLEANUP;
223  	req->flags |= REQ_F_FORCE_ASYNC;
224  	return 0;
225  }
226  
io_symlinkat(struct io_kiocb * req,unsigned int issue_flags)227  int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
228  {
229  	struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
230  	int ret;
231  
232  	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
233  
234  	ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
235  
236  	req->flags &= ~REQ_F_NEED_CLEANUP;
237  	io_req_set_res(req, ret, 0);
238  	return IOU_OK;
239  }
240  
io_linkat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)241  int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
242  {
243  	struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
244  	const char __user *oldf, *newf;
245  
246  	if (sqe->buf_index || sqe->splice_fd_in)
247  		return -EINVAL;
248  	if (unlikely(req->flags & REQ_F_FIXED_FILE))
249  		return -EBADF;
250  
251  	lnk->old_dfd = READ_ONCE(sqe->fd);
252  	lnk->new_dfd = READ_ONCE(sqe->len);
253  	oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
254  	newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
255  	lnk->flags = READ_ONCE(sqe->hardlink_flags);
256  
257  	lnk->oldpath = getname_uflags(oldf, lnk->flags);
258  	if (IS_ERR(lnk->oldpath))
259  		return PTR_ERR(lnk->oldpath);
260  
261  	lnk->newpath = getname(newf);
262  	if (IS_ERR(lnk->newpath)) {
263  		putname(lnk->oldpath);
264  		return PTR_ERR(lnk->newpath);
265  	}
266  
267  	req->flags |= REQ_F_NEED_CLEANUP;
268  	req->flags |= REQ_F_FORCE_ASYNC;
269  	return 0;
270  }
271  
io_linkat(struct io_kiocb * req,unsigned int issue_flags)272  int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
273  {
274  	struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
275  	int ret;
276  
277  	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
278  
279  	ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
280  				lnk->newpath, lnk->flags);
281  
282  	req->flags &= ~REQ_F_NEED_CLEANUP;
283  	io_req_set_res(req, ret, 0);
284  	return IOU_OK;
285  }
286  
io_link_cleanup(struct io_kiocb * req)287  void io_link_cleanup(struct io_kiocb *req)
288  {
289  	struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
290  
291  	putname(sl->oldpath);
292  	putname(sl->newpath);
293  }
294