1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	size_t wsize = req->rreq.wsize;
56 	int rc;
57 
58 	if (!wdata->have_xid) {
59 		wdata->xid = get_xid();
60 		wdata->have_xid = true;
61 	}
62 
63 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 	wdata->server = server;
65 
66 retry:
67 	if (open_file->invalidHandle) {
68 		rc = cifs_reopen_file(open_file, false);
69 		if (rc < 0) {
70 			if (rc == -EAGAIN)
71 				goto retry;
72 			subreq->error = rc;
73 			return netfs_prepare_write_failed(subreq);
74 		}
75 	}
76 
77 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
78 					   &wdata->credits);
79 	if (rc < 0) {
80 		subreq->error = rc;
81 		return netfs_prepare_write_failed(subreq);
82 	}
83 
84 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 	wdata->credits.rreq_debug_index = subreq->debug_index;
86 	wdata->credits.in_flight_check = 1;
87 	trace_smb3_rw_credits(wdata->rreq->debug_id,
88 			      wdata->subreq.debug_index,
89 			      wdata->credits.value,
90 			      server->credits, server->in_flight,
91 			      wdata->credits.value,
92 			      cifs_trace_rw_credits_write_prepare);
93 
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 	if (server->smbd_conn)
96 		stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
97 #endif
98 }
99 
100 /*
101  * Issue a subrequest to upload to the server.
102  */
cifs_issue_write(struct netfs_io_subrequest * subreq)103 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
104 {
105 	struct cifs_io_subrequest *wdata =
106 		container_of(subreq, struct cifs_io_subrequest, subreq);
107 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
108 	int rc;
109 
110 	if (cifs_forced_shutdown(sbi)) {
111 		rc = -EIO;
112 		goto fail;
113 	}
114 
115 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 	if (rc)
117 		goto fail;
118 
119 	rc = -EAGAIN;
120 	if (wdata->req->cfile->invalidHandle)
121 		goto fail;
122 
123 	wdata->server->ops->async_writev(wdata);
124 out:
125 	return;
126 
127 fail:
128 	if (rc == -EAGAIN)
129 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 	else
131 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 	cifs_write_subrequest_terminated(wdata, rc, false);
134 	goto out;
135 }
136 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138 {
139 	cifs_invalidate_cache(wreq->inode, 0);
140 }
141 
142 /*
143  * Negotiate the size of a read operation on behalf of the netfs library.
144  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
146 {
147 	struct netfs_io_request *rreq = subreq->rreq;
148 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 	struct TCP_Server_Info *server = req->server;
151 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
152 	size_t size;
153 	int rc = 0;
154 
155 	if (!rdata->have_xid) {
156 		rdata->xid = get_xid();
157 		rdata->have_xid = true;
158 	}
159 	rdata->server = server;
160 
161 	if (cifs_sb->ctx->rsize == 0)
162 		cifs_sb->ctx->rsize =
163 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
164 						     cifs_sb->ctx);
165 
166 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
167 					   &size, &rdata->credits);
168 	if (rc)
169 		return rc;
170 
171 	rreq->io_streams[0].sreq_max_len = size;
172 
173 	rdata->credits.in_flight_check = 1;
174 	rdata->credits.rreq_debug_id = rreq->debug_id;
175 	rdata->credits.rreq_debug_index = subreq->debug_index;
176 
177 	trace_smb3_rw_credits(rdata->rreq->debug_id,
178 			      rdata->subreq.debug_index,
179 			      rdata->credits.value,
180 			      server->credits, server->in_flight, 0,
181 			      cifs_trace_rw_credits_read_submit);
182 
183 #ifdef CONFIG_CIFS_SMB_DIRECT
184 	if (server->smbd_conn)
185 		rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
186 #endif
187 	return 0;
188 }
189 
190 /*
191  * Issue a read operation on behalf of the netfs helper functions.  We're asked
192  * to make a read of a certain size at a point in the file.  We are permitted
193  * to only read a portion of that, but as long as we read something, the netfs
194  * helper will call us again so that we can issue another read.
195  */
cifs_issue_read(struct netfs_io_subrequest * subreq)196 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
197 {
198 	struct netfs_io_request *rreq = subreq->rreq;
199 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
200 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
201 	struct TCP_Server_Info *server = req->server;
202 	int rc = 0;
203 
204 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
205 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
206 		 subreq->transferred, subreq->len);
207 
208 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
209 	if (rc)
210 		goto failed;
211 
212 	if (req->cfile->invalidHandle) {
213 		do {
214 			rc = cifs_reopen_file(req->cfile, true);
215 		} while (rc == -EAGAIN);
216 		if (rc)
217 			goto failed;
218 	}
219 
220 	if (subreq->rreq->origin != NETFS_DIO_READ)
221 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
222 
223 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
224 	rc = rdata->server->ops->async_readv(rdata);
225 	if (rc)
226 		goto failed;
227 	return;
228 
229 failed:
230 	netfs_read_subreq_terminated(subreq, rc, false);
231 }
232 
233 /*
234  * Writeback calls this when it finds a folio that needs uploading.  This isn't
235  * called if writeback only has copy-to-cache to deal with.
236  */
cifs_begin_writeback(struct netfs_io_request * wreq)237 static void cifs_begin_writeback(struct netfs_io_request *wreq)
238 {
239 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
240 	int ret;
241 
242 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
243 	if (ret) {
244 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
245 		return;
246 	}
247 
248 	wreq->io_streams[0].avail = true;
249 }
250 
251 /*
252  * Initialise a request.
253  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)254 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
255 {
256 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
257 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
258 	struct cifsFileInfo *open_file = NULL;
259 
260 	rreq->rsize = cifs_sb->ctx->rsize;
261 	rreq->wsize = cifs_sb->ctx->wsize;
262 	req->pid = current->tgid; // Ummm...  This may be a workqueue
263 
264 	if (file) {
265 		open_file = file->private_data;
266 		rreq->netfs_priv = file->private_data;
267 		req->cfile = cifsFileInfo_get(open_file);
268 		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
269 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
270 			req->pid = req->cfile->pid;
271 	} else if (rreq->origin != NETFS_WRITEBACK) {
272 		WARN_ON_ONCE(1);
273 		return -EIO;
274 	}
275 
276 	return 0;
277 }
278 
279 /*
280  * Completion of a request operation.
281  */
cifs_rreq_done(struct netfs_io_request * rreq)282 static void cifs_rreq_done(struct netfs_io_request *rreq)
283 {
284 	struct timespec64 atime, mtime;
285 	struct inode *inode = rreq->inode;
286 
287 	/* we do not want atime to be less than mtime, it broke some apps */
288 	atime = inode_set_atime_to_ts(inode, current_time(inode));
289 	mtime = inode_get_mtime(inode);
290 	if (timespec64_compare(&atime, &mtime))
291 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
292 }
293 
cifs_free_request(struct netfs_io_request * rreq)294 static void cifs_free_request(struct netfs_io_request *rreq)
295 {
296 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
297 
298 	if (req->cfile)
299 		cifsFileInfo_put(req->cfile);
300 }
301 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)302 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
303 {
304 	struct cifs_io_subrequest *rdata =
305 		container_of(subreq, struct cifs_io_subrequest, subreq);
306 	int rc = subreq->error;
307 
308 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
309 #ifdef CONFIG_CIFS_SMB_DIRECT
310 		if (rdata->mr) {
311 			smbd_deregister_mr(rdata->mr);
312 			rdata->mr = NULL;
313 		}
314 #endif
315 	}
316 
317 	if (rdata->credits.value != 0) {
318 		trace_smb3_rw_credits(rdata->rreq->debug_id,
319 				      rdata->subreq.debug_index,
320 				      rdata->credits.value,
321 				      rdata->server ? rdata->server->credits : 0,
322 				      rdata->server ? rdata->server->in_flight : 0,
323 				      -rdata->credits.value,
324 				      cifs_trace_rw_credits_free_subreq);
325 		if (rdata->server)
326 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
327 		else
328 			rdata->credits.value = 0;
329 	}
330 
331 	if (rdata->have_xid)
332 		free_xid(rdata->xid);
333 }
334 
335 const struct netfs_request_ops cifs_req_ops = {
336 	.request_pool		= &cifs_io_request_pool,
337 	.subrequest_pool	= &cifs_io_subrequest_pool,
338 	.init_request		= cifs_init_request,
339 	.free_request		= cifs_free_request,
340 	.free_subrequest	= cifs_free_subrequest,
341 	.prepare_read		= cifs_prepare_read,
342 	.issue_read		= cifs_issue_read,
343 	.done			= cifs_rreq_done,
344 	.begin_writeback	= cifs_begin_writeback,
345 	.prepare_write		= cifs_prepare_write,
346 	.issue_write		= cifs_issue_write,
347 	.invalidate_cache	= cifs_netfs_invalidate_cache,
348 };
349 
350 /*
351  * Mark as invalid, all open files on tree connections since they
352  * were closed when session to server was lost.
353  */
354 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)355 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
356 {
357 	struct cifsFileInfo *open_file = NULL;
358 	struct list_head *tmp;
359 	struct list_head *tmp1;
360 
361 	/* only send once per connect */
362 	spin_lock(&tcon->tc_lock);
363 	if (tcon->need_reconnect)
364 		tcon->status = TID_NEED_RECON;
365 
366 	if (tcon->status != TID_NEED_RECON) {
367 		spin_unlock(&tcon->tc_lock);
368 		return;
369 	}
370 	tcon->status = TID_IN_FILES_INVALIDATE;
371 	spin_unlock(&tcon->tc_lock);
372 
373 	/* list all files open on tree connection and mark them invalid */
374 	spin_lock(&tcon->open_file_lock);
375 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
376 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
377 		open_file->invalidHandle = true;
378 		open_file->oplock_break_cancelled = true;
379 	}
380 	spin_unlock(&tcon->open_file_lock);
381 
382 	invalidate_all_cached_dirs(tcon);
383 	spin_lock(&tcon->tc_lock);
384 	if (tcon->status == TID_IN_FILES_INVALIDATE)
385 		tcon->status = TID_NEED_TCON;
386 	spin_unlock(&tcon->tc_lock);
387 
388 	/*
389 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
390 	 * to this tcon.
391 	 */
392 }
393 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)394 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
395 {
396 	if ((flags & O_ACCMODE) == O_RDONLY)
397 		return GENERIC_READ;
398 	else if ((flags & O_ACCMODE) == O_WRONLY)
399 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
400 	else if ((flags & O_ACCMODE) == O_RDWR) {
401 		/* GENERIC_ALL is too much permission to request
402 		   can cause unnecessary access denied on create */
403 		/* return GENERIC_ALL; */
404 		return (GENERIC_READ | GENERIC_WRITE);
405 	}
406 
407 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
408 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
409 		FILE_READ_DATA);
410 }
411 
412 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)413 static u32 cifs_posix_convert_flags(unsigned int flags)
414 {
415 	u32 posix_flags = 0;
416 
417 	if ((flags & O_ACCMODE) == O_RDONLY)
418 		posix_flags = SMB_O_RDONLY;
419 	else if ((flags & O_ACCMODE) == O_WRONLY)
420 		posix_flags = SMB_O_WRONLY;
421 	else if ((flags & O_ACCMODE) == O_RDWR)
422 		posix_flags = SMB_O_RDWR;
423 
424 	if (flags & O_CREAT) {
425 		posix_flags |= SMB_O_CREAT;
426 		if (flags & O_EXCL)
427 			posix_flags |= SMB_O_EXCL;
428 	} else if (flags & O_EXCL)
429 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
430 			 current->comm, current->tgid);
431 
432 	if (flags & O_TRUNC)
433 		posix_flags |= SMB_O_TRUNC;
434 	/* be safe and imply O_SYNC for O_DSYNC */
435 	if (flags & O_DSYNC)
436 		posix_flags |= SMB_O_SYNC;
437 	if (flags & O_DIRECTORY)
438 		posix_flags |= SMB_O_DIRECTORY;
439 	if (flags & O_NOFOLLOW)
440 		posix_flags |= SMB_O_NOFOLLOW;
441 	if (flags & O_DIRECT)
442 		posix_flags |= SMB_O_DIRECT;
443 
444 	return posix_flags;
445 }
446 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
447 
cifs_get_disposition(unsigned int flags)448 static inline int cifs_get_disposition(unsigned int flags)
449 {
450 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
451 		return FILE_CREATE;
452 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
453 		return FILE_OVERWRITE_IF;
454 	else if ((flags & O_CREAT) == O_CREAT)
455 		return FILE_OPEN_IF;
456 	else if ((flags & O_TRUNC) == O_TRUNC)
457 		return FILE_OVERWRITE;
458 	else
459 		return FILE_OPEN;
460 }
461 
462 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)463 int cifs_posix_open(const char *full_path, struct inode **pinode,
464 			struct super_block *sb, int mode, unsigned int f_flags,
465 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
466 {
467 	int rc;
468 	FILE_UNIX_BASIC_INFO *presp_data;
469 	__u32 posix_flags = 0;
470 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
471 	struct cifs_fattr fattr;
472 	struct tcon_link *tlink;
473 	struct cifs_tcon *tcon;
474 
475 	cifs_dbg(FYI, "posix open %s\n", full_path);
476 
477 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
478 	if (presp_data == NULL)
479 		return -ENOMEM;
480 
481 	tlink = cifs_sb_tlink(cifs_sb);
482 	if (IS_ERR(tlink)) {
483 		rc = PTR_ERR(tlink);
484 		goto posix_open_ret;
485 	}
486 
487 	tcon = tlink_tcon(tlink);
488 	mode &= ~current_umask();
489 
490 	posix_flags = cifs_posix_convert_flags(f_flags);
491 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
492 			     poplock, full_path, cifs_sb->local_nls,
493 			     cifs_remap(cifs_sb));
494 	cifs_put_tlink(tlink);
495 
496 	if (rc)
497 		goto posix_open_ret;
498 
499 	if (presp_data->Type == cpu_to_le32(-1))
500 		goto posix_open_ret; /* open ok, caller does qpathinfo */
501 
502 	if (!pinode)
503 		goto posix_open_ret; /* caller does not need info */
504 
505 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
506 
507 	/* get new inode and set it up */
508 	if (*pinode == NULL) {
509 		cifs_fill_uniqueid(sb, &fattr);
510 		*pinode = cifs_iget(sb, &fattr);
511 		if (!*pinode) {
512 			rc = -ENOMEM;
513 			goto posix_open_ret;
514 		}
515 	} else {
516 		cifs_revalidate_mapping(*pinode);
517 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
518 	}
519 
520 posix_open_ret:
521 	kfree(presp_data);
522 	return rc;
523 }
524 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
525 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)526 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
527 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
528 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
529 {
530 	int rc;
531 	int desired_access;
532 	int disposition;
533 	int create_options = CREATE_NOT_DIR;
534 	struct TCP_Server_Info *server = tcon->ses->server;
535 	struct cifs_open_parms oparms;
536 	int rdwr_for_fscache = 0;
537 
538 	if (!server->ops->open)
539 		return -ENOSYS;
540 
541 	/* If we're caching, we need to be able to fill in around partial writes. */
542 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
543 		rdwr_for_fscache = 1;
544 
545 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
546 
547 /*********************************************************************
548  *  open flag mapping table:
549  *
550  *	POSIX Flag            CIFS Disposition
551  *	----------            ----------------
552  *	O_CREAT               FILE_OPEN_IF
553  *	O_CREAT | O_EXCL      FILE_CREATE
554  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
555  *	O_TRUNC               FILE_OVERWRITE
556  *	none of the above     FILE_OPEN
557  *
558  *	Note that there is not a direct match between disposition
559  *	FILE_SUPERSEDE (ie create whether or not file exists although
560  *	O_CREAT | O_TRUNC is similar but truncates the existing
561  *	file rather than creating a new file as FILE_SUPERSEDE does
562  *	(which uses the attributes / metadata passed in on open call)
563  *?
564  *?  O_SYNC is a reasonable match to CIFS writethrough flag
565  *?  and the read write flags match reasonably.  O_LARGEFILE
566  *?  is irrelevant because largefile support is always used
567  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
568  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
569  *********************************************************************/
570 
571 	disposition = cifs_get_disposition(f_flags);
572 
573 	/* BB pass O_SYNC flag through on file attributes .. BB */
574 
575 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
576 	if (f_flags & O_SYNC)
577 		create_options |= CREATE_WRITE_THROUGH;
578 
579 	if (f_flags & O_DIRECT)
580 		create_options |= CREATE_NO_BUFFER;
581 
582 retry_open:
583 	oparms = (struct cifs_open_parms) {
584 		.tcon = tcon,
585 		.cifs_sb = cifs_sb,
586 		.desired_access = desired_access,
587 		.create_options = cifs_create_options(cifs_sb, create_options),
588 		.disposition = disposition,
589 		.path = full_path,
590 		.fid = fid,
591 	};
592 
593 	rc = server->ops->open(xid, &oparms, oplock, buf);
594 	if (rc) {
595 		if (rc == -EACCES && rdwr_for_fscache == 1) {
596 			desired_access = cifs_convert_flags(f_flags, 0);
597 			rdwr_for_fscache = 2;
598 			goto retry_open;
599 		}
600 		return rc;
601 	}
602 	if (rdwr_for_fscache == 2)
603 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
604 
605 	/* TODO: Add support for calling posix query info but with passing in fid */
606 	if (tcon->unix_ext)
607 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
608 					      xid);
609 	else
610 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
611 					 xid, fid);
612 
613 	if (rc) {
614 		server->ops->close(xid, tcon, fid);
615 		if (rc == -ESTALE)
616 			rc = -EOPENSTALE;
617 	}
618 
619 	return rc;
620 }
621 
622 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)623 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
624 {
625 	struct cifs_fid_locks *cur;
626 	bool has_locks = false;
627 
628 	down_read(&cinode->lock_sem);
629 	list_for_each_entry(cur, &cinode->llist, llist) {
630 		if (!list_empty(&cur->locks)) {
631 			has_locks = true;
632 			break;
633 		}
634 	}
635 	up_read(&cinode->lock_sem);
636 	return has_locks;
637 }
638 
639 void
cifs_down_write(struct rw_semaphore * sem)640 cifs_down_write(struct rw_semaphore *sem)
641 {
642 	while (!down_write_trylock(sem))
643 		msleep(10);
644 }
645 
646 static void cifsFileInfo_put_work(struct work_struct *work);
647 void serverclose_work(struct work_struct *work);
648 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)649 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
650 				       struct tcon_link *tlink, __u32 oplock,
651 				       const char *symlink_target)
652 {
653 	struct dentry *dentry = file_dentry(file);
654 	struct inode *inode = d_inode(dentry);
655 	struct cifsInodeInfo *cinode = CIFS_I(inode);
656 	struct cifsFileInfo *cfile;
657 	struct cifs_fid_locks *fdlocks;
658 	struct cifs_tcon *tcon = tlink_tcon(tlink);
659 	struct TCP_Server_Info *server = tcon->ses->server;
660 
661 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
662 	if (cfile == NULL)
663 		return cfile;
664 
665 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
666 	if (!fdlocks) {
667 		kfree(cfile);
668 		return NULL;
669 	}
670 
671 	if (symlink_target) {
672 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
673 		if (!cfile->symlink_target) {
674 			kfree(fdlocks);
675 			kfree(cfile);
676 			return NULL;
677 		}
678 	}
679 
680 	INIT_LIST_HEAD(&fdlocks->locks);
681 	fdlocks->cfile = cfile;
682 	cfile->llist = fdlocks;
683 
684 	cfile->count = 1;
685 	cfile->pid = current->tgid;
686 	cfile->uid = current_fsuid();
687 	cfile->dentry = dget(dentry);
688 	cfile->f_flags = file->f_flags;
689 	cfile->invalidHandle = false;
690 	cfile->deferred_close_scheduled = false;
691 	cfile->tlink = cifs_get_tlink(tlink);
692 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
693 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
694 	INIT_WORK(&cfile->serverclose, serverclose_work);
695 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
696 	mutex_init(&cfile->fh_mutex);
697 	spin_lock_init(&cfile->file_info_lock);
698 
699 	cifs_sb_active(inode->i_sb);
700 
701 	/*
702 	 * If the server returned a read oplock and we have mandatory brlocks,
703 	 * set oplock level to None.
704 	 */
705 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
706 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
707 		oplock = 0;
708 	}
709 
710 	cifs_down_write(&cinode->lock_sem);
711 	list_add(&fdlocks->llist, &cinode->llist);
712 	up_write(&cinode->lock_sem);
713 
714 	spin_lock(&tcon->open_file_lock);
715 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
716 		oplock = fid->pending_open->oplock;
717 	list_del(&fid->pending_open->olist);
718 
719 	fid->purge_cache = false;
720 	server->ops->set_fid(cfile, fid, oplock);
721 
722 	list_add(&cfile->tlist, &tcon->openFileList);
723 	atomic_inc(&tcon->num_local_opens);
724 
725 	/* if readable file instance put first in list*/
726 	spin_lock(&cinode->open_file_lock);
727 	if (file->f_mode & FMODE_READ)
728 		list_add(&cfile->flist, &cinode->openFileList);
729 	else
730 		list_add_tail(&cfile->flist, &cinode->openFileList);
731 	spin_unlock(&cinode->open_file_lock);
732 	spin_unlock(&tcon->open_file_lock);
733 
734 	if (fid->purge_cache)
735 		cifs_zap_mapping(inode);
736 
737 	file->private_data = cfile;
738 	return cfile;
739 }
740 
741 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)742 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
743 {
744 	spin_lock(&cifs_file->file_info_lock);
745 	cifsFileInfo_get_locked(cifs_file);
746 	spin_unlock(&cifs_file->file_info_lock);
747 	return cifs_file;
748 }
749 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)750 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
751 {
752 	struct inode *inode = d_inode(cifs_file->dentry);
753 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
754 	struct cifsLockInfo *li, *tmp;
755 	struct super_block *sb = inode->i_sb;
756 
757 	/*
758 	 * Delete any outstanding lock records. We'll lose them when the file
759 	 * is closed anyway.
760 	 */
761 	cifs_down_write(&cifsi->lock_sem);
762 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
763 		list_del(&li->llist);
764 		cifs_del_lock_waiters(li);
765 		kfree(li);
766 	}
767 	list_del(&cifs_file->llist->llist);
768 	kfree(cifs_file->llist);
769 	up_write(&cifsi->lock_sem);
770 
771 	cifs_put_tlink(cifs_file->tlink);
772 	dput(cifs_file->dentry);
773 	cifs_sb_deactive(sb);
774 	kfree(cifs_file->symlink_target);
775 	kfree(cifs_file);
776 }
777 
cifsFileInfo_put_work(struct work_struct * work)778 static void cifsFileInfo_put_work(struct work_struct *work)
779 {
780 	struct cifsFileInfo *cifs_file = container_of(work,
781 			struct cifsFileInfo, put);
782 
783 	cifsFileInfo_put_final(cifs_file);
784 }
785 
serverclose_work(struct work_struct * work)786 void serverclose_work(struct work_struct *work)
787 {
788 	struct cifsFileInfo *cifs_file = container_of(work,
789 			struct cifsFileInfo, serverclose);
790 
791 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
792 
793 	struct TCP_Server_Info *server = tcon->ses->server;
794 	int rc = 0;
795 	int retries = 0;
796 	int MAX_RETRIES = 4;
797 
798 	do {
799 		if (server->ops->close_getattr)
800 			rc = server->ops->close_getattr(0, tcon, cifs_file);
801 		else if (server->ops->close)
802 			rc = server->ops->close(0, tcon, &cifs_file->fid);
803 
804 		if (rc == -EBUSY || rc == -EAGAIN) {
805 			retries++;
806 			msleep(250);
807 		}
808 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
809 	);
810 
811 	if (retries == MAX_RETRIES)
812 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
813 
814 	if (cifs_file->offload)
815 		queue_work(fileinfo_put_wq, &cifs_file->put);
816 	else
817 		cifsFileInfo_put_final(cifs_file);
818 }
819 
820 /**
821  * cifsFileInfo_put - release a reference of file priv data
822  *
823  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
824  *
825  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
826  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)827 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
828 {
829 	_cifsFileInfo_put(cifs_file, true, true);
830 }
831 
832 /**
833  * _cifsFileInfo_put - release a reference of file priv data
834  *
835  * This may involve closing the filehandle @cifs_file out on the
836  * server. Must be called without holding tcon->open_file_lock,
837  * cinode->open_file_lock and cifs_file->file_info_lock.
838  *
839  * If @wait_for_oplock_handler is true and we are releasing the last
840  * reference, wait for any running oplock break handler of the file
841  * and cancel any pending one.
842  *
843  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
844  * @wait_oplock_handler: must be false if called from oplock_break_handler
845  * @offload:	not offloaded on close and oplock breaks
846  *
847  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)848 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
849 		       bool wait_oplock_handler, bool offload)
850 {
851 	struct inode *inode = d_inode(cifs_file->dentry);
852 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
853 	struct TCP_Server_Info *server = tcon->ses->server;
854 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
855 	struct super_block *sb = inode->i_sb;
856 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
857 	struct cifs_fid fid = {};
858 	struct cifs_pending_open open;
859 	bool oplock_break_cancelled;
860 	bool serverclose_offloaded = false;
861 
862 	spin_lock(&tcon->open_file_lock);
863 	spin_lock(&cifsi->open_file_lock);
864 	spin_lock(&cifs_file->file_info_lock);
865 
866 	cifs_file->offload = offload;
867 	if (--cifs_file->count > 0) {
868 		spin_unlock(&cifs_file->file_info_lock);
869 		spin_unlock(&cifsi->open_file_lock);
870 		spin_unlock(&tcon->open_file_lock);
871 		return;
872 	}
873 	spin_unlock(&cifs_file->file_info_lock);
874 
875 	if (server->ops->get_lease_key)
876 		server->ops->get_lease_key(inode, &fid);
877 
878 	/* store open in pending opens to make sure we don't miss lease break */
879 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
880 
881 	/* remove it from the lists */
882 	list_del(&cifs_file->flist);
883 	list_del(&cifs_file->tlist);
884 	atomic_dec(&tcon->num_local_opens);
885 
886 	if (list_empty(&cifsi->openFileList)) {
887 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
888 			 d_inode(cifs_file->dentry));
889 		/*
890 		 * In strict cache mode we need invalidate mapping on the last
891 		 * close  because it may cause a error when we open this file
892 		 * again and get at least level II oplock.
893 		 */
894 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
895 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
896 		cifs_set_oplock_level(cifsi, 0);
897 	}
898 
899 	spin_unlock(&cifsi->open_file_lock);
900 	spin_unlock(&tcon->open_file_lock);
901 
902 	oplock_break_cancelled = wait_oplock_handler ?
903 		cancel_work_sync(&cifs_file->oplock_break) : false;
904 
905 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
906 		struct TCP_Server_Info *server = tcon->ses->server;
907 		unsigned int xid;
908 		int rc = 0;
909 
910 		xid = get_xid();
911 		if (server->ops->close_getattr)
912 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
913 		else if (server->ops->close)
914 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
915 		_free_xid(xid);
916 
917 		if (rc == -EBUSY || rc == -EAGAIN) {
918 			// Server close failed, hence offloading it as an async op
919 			queue_work(serverclose_wq, &cifs_file->serverclose);
920 			serverclose_offloaded = true;
921 		}
922 	}
923 
924 	if (oplock_break_cancelled)
925 		cifs_done_oplock_break(cifsi);
926 
927 	cifs_del_pending_open(&open);
928 
929 	// if serverclose has been offloaded to wq (on failure), it will
930 	// handle offloading put as well. If serverclose not offloaded,
931 	// we need to handle offloading put here.
932 	if (!serverclose_offloaded) {
933 		if (offload)
934 			queue_work(fileinfo_put_wq, &cifs_file->put);
935 		else
936 			cifsFileInfo_put_final(cifs_file);
937 	}
938 }
939 
cifs_open(struct inode * inode,struct file * file)940 int cifs_open(struct inode *inode, struct file *file)
941 
942 {
943 	int rc = -EACCES;
944 	unsigned int xid;
945 	__u32 oplock;
946 	struct cifs_sb_info *cifs_sb;
947 	struct TCP_Server_Info *server;
948 	struct cifs_tcon *tcon;
949 	struct tcon_link *tlink;
950 	struct cifsFileInfo *cfile = NULL;
951 	void *page;
952 	const char *full_path;
953 	bool posix_open_ok = false;
954 	struct cifs_fid fid = {};
955 	struct cifs_pending_open open;
956 	struct cifs_open_info_data data = {};
957 
958 	xid = get_xid();
959 
960 	cifs_sb = CIFS_SB(inode->i_sb);
961 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
962 		free_xid(xid);
963 		return -EIO;
964 	}
965 
966 	tlink = cifs_sb_tlink(cifs_sb);
967 	if (IS_ERR(tlink)) {
968 		free_xid(xid);
969 		return PTR_ERR(tlink);
970 	}
971 	tcon = tlink_tcon(tlink);
972 	server = tcon->ses->server;
973 
974 	page = alloc_dentry_path();
975 	full_path = build_path_from_dentry(file_dentry(file), page);
976 	if (IS_ERR(full_path)) {
977 		rc = PTR_ERR(full_path);
978 		goto out;
979 	}
980 
981 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
982 		 inode, file->f_flags, full_path);
983 
984 	if (file->f_flags & O_DIRECT &&
985 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
986 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
987 			file->f_op = &cifs_file_direct_nobrl_ops;
988 		else
989 			file->f_op = &cifs_file_direct_ops;
990 	}
991 
992 	/* Get the cached handle as SMB2 close is deferred */
993 	rc = cifs_get_readable_path(tcon, full_path, &cfile);
994 	if (rc == 0) {
995 		if (file->f_flags == cfile->f_flags) {
996 			file->private_data = cfile;
997 			spin_lock(&CIFS_I(inode)->deferred_lock);
998 			cifs_del_deferred_close(cfile);
999 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1000 			goto use_cache;
1001 		} else {
1002 			_cifsFileInfo_put(cfile, true, false);
1003 		}
1004 	}
1005 
1006 	if (server->oplocks)
1007 		oplock = REQ_OPLOCK;
1008 	else
1009 		oplock = 0;
1010 
1011 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1012 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1013 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1014 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1015 		/* can not refresh inode info since size could be stale */
1016 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1017 				cifs_sb->ctx->file_mode /* ignored */,
1018 				file->f_flags, &oplock, &fid.netfid, xid);
1019 		if (rc == 0) {
1020 			cifs_dbg(FYI, "posix open succeeded\n");
1021 			posix_open_ok = true;
1022 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1023 			if (tcon->ses->serverNOS)
1024 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1025 					 tcon->ses->ip_addr,
1026 					 tcon->ses->serverNOS);
1027 			tcon->broken_posix_open = true;
1028 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1029 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1030 			goto out;
1031 		/*
1032 		 * Else fallthrough to retry open the old way on network i/o
1033 		 * or DFS errors.
1034 		 */
1035 	}
1036 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1037 
1038 	if (server->ops->get_lease_key)
1039 		server->ops->get_lease_key(inode, &fid);
1040 
1041 	cifs_add_pending_open(&fid, tlink, &open);
1042 
1043 	if (!posix_open_ok) {
1044 		if (server->ops->get_lease_key)
1045 			server->ops->get_lease_key(inode, &fid);
1046 
1047 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1048 				  xid, &data);
1049 		if (rc) {
1050 			cifs_del_pending_open(&open);
1051 			goto out;
1052 		}
1053 	}
1054 
1055 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1056 	if (cfile == NULL) {
1057 		if (server->ops->close)
1058 			server->ops->close(xid, tcon, &fid);
1059 		cifs_del_pending_open(&open);
1060 		rc = -ENOMEM;
1061 		goto out;
1062 	}
1063 
1064 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1065 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1066 		/*
1067 		 * Time to set mode which we can not set earlier due to
1068 		 * problems creating new read-only files.
1069 		 */
1070 		struct cifs_unix_set_info_args args = {
1071 			.mode	= inode->i_mode,
1072 			.uid	= INVALID_UID, /* no change */
1073 			.gid	= INVALID_GID, /* no change */
1074 			.ctime	= NO_CHANGE_64,
1075 			.atime	= NO_CHANGE_64,
1076 			.mtime	= NO_CHANGE_64,
1077 			.device	= 0,
1078 		};
1079 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1080 				       cfile->pid);
1081 	}
1082 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1083 
1084 use_cache:
1085 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1086 			   file->f_mode & FMODE_WRITE);
1087 	if (!(file->f_flags & O_DIRECT))
1088 		goto out;
1089 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1090 		goto out;
1091 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1092 
1093 out:
1094 	free_dentry_path(page);
1095 	free_xid(xid);
1096 	cifs_put_tlink(tlink);
1097 	cifs_free_open_info(&data);
1098 	return rc;
1099 }
1100 
1101 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1102 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1103 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1104 
1105 /*
1106  * Try to reacquire byte range locks that were released when session
1107  * to server was lost.
1108  */
1109 static int
cifs_relock_file(struct cifsFileInfo * cfile)1110 cifs_relock_file(struct cifsFileInfo *cfile)
1111 {
1112 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1113 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1114 	int rc = 0;
1115 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1116 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1117 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1118 
1119 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1120 	if (cinode->can_cache_brlcks) {
1121 		/* can cache locks - no need to relock */
1122 		up_read(&cinode->lock_sem);
1123 		return rc;
1124 	}
1125 
1126 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1127 	if (cap_unix(tcon->ses) &&
1128 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1129 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1130 		rc = cifs_push_posix_locks(cfile);
1131 	else
1132 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1133 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1134 
1135 	up_read(&cinode->lock_sem);
1136 	return rc;
1137 }
1138 
1139 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1140 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1141 {
1142 	int rc = -EACCES;
1143 	unsigned int xid;
1144 	__u32 oplock;
1145 	struct cifs_sb_info *cifs_sb;
1146 	struct cifs_tcon *tcon;
1147 	struct TCP_Server_Info *server;
1148 	struct cifsInodeInfo *cinode;
1149 	struct inode *inode;
1150 	void *page;
1151 	const char *full_path;
1152 	int desired_access;
1153 	int disposition = FILE_OPEN;
1154 	int create_options = CREATE_NOT_DIR;
1155 	struct cifs_open_parms oparms;
1156 	int rdwr_for_fscache = 0;
1157 
1158 	xid = get_xid();
1159 	mutex_lock(&cfile->fh_mutex);
1160 	if (!cfile->invalidHandle) {
1161 		mutex_unlock(&cfile->fh_mutex);
1162 		free_xid(xid);
1163 		return 0;
1164 	}
1165 
1166 	inode = d_inode(cfile->dentry);
1167 	cifs_sb = CIFS_SB(inode->i_sb);
1168 	tcon = tlink_tcon(cfile->tlink);
1169 	server = tcon->ses->server;
1170 
1171 	/*
1172 	 * Can not grab rename sem here because various ops, including those
1173 	 * that already have the rename sem can end up causing writepage to get
1174 	 * called and if the server was down that means we end up here, and we
1175 	 * can never tell if the caller already has the rename_sem.
1176 	 */
1177 	page = alloc_dentry_path();
1178 	full_path = build_path_from_dentry(cfile->dentry, page);
1179 	if (IS_ERR(full_path)) {
1180 		mutex_unlock(&cfile->fh_mutex);
1181 		free_dentry_path(page);
1182 		free_xid(xid);
1183 		return PTR_ERR(full_path);
1184 	}
1185 
1186 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1187 		 inode, cfile->f_flags, full_path);
1188 
1189 	if (tcon->ses->server->oplocks)
1190 		oplock = REQ_OPLOCK;
1191 	else
1192 		oplock = 0;
1193 
1194 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1195 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1196 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1197 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1198 		/*
1199 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1200 		 * original open. Must mask them off for a reopen.
1201 		 */
1202 		unsigned int oflags = cfile->f_flags &
1203 						~(O_CREAT | O_EXCL | O_TRUNC);
1204 
1205 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1206 				     cifs_sb->ctx->file_mode /* ignored */,
1207 				     oflags, &oplock, &cfile->fid.netfid, xid);
1208 		if (rc == 0) {
1209 			cifs_dbg(FYI, "posix reopen succeeded\n");
1210 			oparms.reconnect = true;
1211 			goto reopen_success;
1212 		}
1213 		/*
1214 		 * fallthrough to retry open the old way on errors, especially
1215 		 * in the reconnect path it is important to retry hard
1216 		 */
1217 	}
1218 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1219 
1220 	/* If we're caching, we need to be able to fill in around partial writes. */
1221 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1222 		rdwr_for_fscache = 1;
1223 
1224 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1225 
1226 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1227 	if (cfile->f_flags & O_SYNC)
1228 		create_options |= CREATE_WRITE_THROUGH;
1229 
1230 	if (cfile->f_flags & O_DIRECT)
1231 		create_options |= CREATE_NO_BUFFER;
1232 
1233 	if (server->ops->get_lease_key)
1234 		server->ops->get_lease_key(inode, &cfile->fid);
1235 
1236 retry_open:
1237 	oparms = (struct cifs_open_parms) {
1238 		.tcon = tcon,
1239 		.cifs_sb = cifs_sb,
1240 		.desired_access = desired_access,
1241 		.create_options = cifs_create_options(cifs_sb, create_options),
1242 		.disposition = disposition,
1243 		.path = full_path,
1244 		.fid = &cfile->fid,
1245 		.reconnect = true,
1246 	};
1247 
1248 	/*
1249 	 * Can not refresh inode by passing in file_info buf to be returned by
1250 	 * ops->open and then calling get_inode_info with returned buf since
1251 	 * file might have write behind data that needs to be flushed and server
1252 	 * version of file size can be stale. If we knew for sure that inode was
1253 	 * not dirty locally we could do this.
1254 	 */
1255 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1256 	if (rc == -ENOENT && oparms.reconnect == false) {
1257 		/* durable handle timeout is expired - open the file again */
1258 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1259 		/* indicate that we need to relock the file */
1260 		oparms.reconnect = true;
1261 	}
1262 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1263 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1264 		rdwr_for_fscache = 2;
1265 		goto retry_open;
1266 	}
1267 
1268 	if (rc) {
1269 		mutex_unlock(&cfile->fh_mutex);
1270 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1271 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1272 		goto reopen_error_exit;
1273 	}
1274 
1275 	if (rdwr_for_fscache == 2)
1276 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1277 
1278 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1279 reopen_success:
1280 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1281 	cfile->invalidHandle = false;
1282 	mutex_unlock(&cfile->fh_mutex);
1283 	cinode = CIFS_I(inode);
1284 
1285 	if (can_flush) {
1286 		rc = filemap_write_and_wait(inode->i_mapping);
1287 		if (!is_interrupt_error(rc))
1288 			mapping_set_error(inode->i_mapping, rc);
1289 
1290 		if (tcon->posix_extensions) {
1291 			rc = smb311_posix_get_inode_info(&inode, full_path,
1292 							 NULL, inode->i_sb, xid);
1293 		} else if (tcon->unix_ext) {
1294 			rc = cifs_get_inode_info_unix(&inode, full_path,
1295 						      inode->i_sb, xid);
1296 		} else {
1297 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1298 						 inode->i_sb, xid, NULL);
1299 		}
1300 	}
1301 	/*
1302 	 * Else we are writing out data to server already and could deadlock if
1303 	 * we tried to flush data, and since we do not know if we have data that
1304 	 * would invalidate the current end of file on the server we can not go
1305 	 * to the server to get the new inode info.
1306 	 */
1307 
1308 	/*
1309 	 * If the server returned a read oplock and we have mandatory brlocks,
1310 	 * set oplock level to None.
1311 	 */
1312 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1313 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1314 		oplock = 0;
1315 	}
1316 
1317 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1318 	if (oparms.reconnect)
1319 		cifs_relock_file(cfile);
1320 
1321 reopen_error_exit:
1322 	free_dentry_path(page);
1323 	free_xid(xid);
1324 	return rc;
1325 }
1326 
smb2_deferred_work_close(struct work_struct * work)1327 void smb2_deferred_work_close(struct work_struct *work)
1328 {
1329 	struct cifsFileInfo *cfile = container_of(work,
1330 			struct cifsFileInfo, deferred.work);
1331 
1332 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1333 	cifs_del_deferred_close(cfile);
1334 	cfile->deferred_close_scheduled = false;
1335 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1336 	_cifsFileInfo_put(cfile, true, false);
1337 }
1338 
1339 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1340 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1341 {
1342 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1343 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1344 
1345 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1346 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1347 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1348 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1349 
1350 }
1351 
cifs_close(struct inode * inode,struct file * file)1352 int cifs_close(struct inode *inode, struct file *file)
1353 {
1354 	struct cifsFileInfo *cfile;
1355 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1356 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1357 	struct cifs_deferred_close *dclose;
1358 
1359 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1360 
1361 	if (file->private_data != NULL) {
1362 		cfile = file->private_data;
1363 		file->private_data = NULL;
1364 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1365 		if ((cfile->status_file_deleted == false) &&
1366 		    (smb2_can_defer_close(inode, dclose))) {
1367 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1368 				inode_set_mtime_to_ts(inode,
1369 						      inode_set_ctime_current(inode));
1370 			}
1371 			spin_lock(&cinode->deferred_lock);
1372 			cifs_add_deferred_close(cfile, dclose);
1373 			if (cfile->deferred_close_scheduled &&
1374 			    delayed_work_pending(&cfile->deferred)) {
1375 				/*
1376 				 * If there is no pending work, mod_delayed_work queues new work.
1377 				 * So, Increase the ref count to avoid use-after-free.
1378 				 */
1379 				if (!mod_delayed_work(deferredclose_wq,
1380 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1381 					cifsFileInfo_get(cfile);
1382 			} else {
1383 				/* Deferred close for files */
1384 				queue_delayed_work(deferredclose_wq,
1385 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1386 				cfile->deferred_close_scheduled = true;
1387 				spin_unlock(&cinode->deferred_lock);
1388 				return 0;
1389 			}
1390 			spin_unlock(&cinode->deferred_lock);
1391 			_cifsFileInfo_put(cfile, true, false);
1392 		} else {
1393 			_cifsFileInfo_put(cfile, true, false);
1394 			kfree(dclose);
1395 		}
1396 	}
1397 
1398 	/* return code from the ->release op is always ignored */
1399 	return 0;
1400 }
1401 
1402 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1403 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1404 {
1405 	struct cifsFileInfo *open_file, *tmp;
1406 	LIST_HEAD(tmp_list);
1407 
1408 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1409 		return;
1410 
1411 	tcon->need_reopen_files = false;
1412 
1413 	cifs_dbg(FYI, "Reopen persistent handles\n");
1414 
1415 	/* list all files open on tree connection, reopen resilient handles  */
1416 	spin_lock(&tcon->open_file_lock);
1417 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1418 		if (!open_file->invalidHandle)
1419 			continue;
1420 		cifsFileInfo_get(open_file);
1421 		list_add_tail(&open_file->rlist, &tmp_list);
1422 	}
1423 	spin_unlock(&tcon->open_file_lock);
1424 
1425 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1426 		if (cifs_reopen_file(open_file, false /* do not flush */))
1427 			tcon->need_reopen_files = true;
1428 		list_del_init(&open_file->rlist);
1429 		cifsFileInfo_put(open_file);
1430 	}
1431 }
1432 
cifs_closedir(struct inode * inode,struct file * file)1433 int cifs_closedir(struct inode *inode, struct file *file)
1434 {
1435 	int rc = 0;
1436 	unsigned int xid;
1437 	struct cifsFileInfo *cfile = file->private_data;
1438 	struct cifs_tcon *tcon;
1439 	struct TCP_Server_Info *server;
1440 	char *buf;
1441 
1442 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1443 
1444 	if (cfile == NULL)
1445 		return rc;
1446 
1447 	xid = get_xid();
1448 	tcon = tlink_tcon(cfile->tlink);
1449 	server = tcon->ses->server;
1450 
1451 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1452 	spin_lock(&cfile->file_info_lock);
1453 	if (server->ops->dir_needs_close(cfile)) {
1454 		cfile->invalidHandle = true;
1455 		spin_unlock(&cfile->file_info_lock);
1456 		if (server->ops->close_dir)
1457 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1458 		else
1459 			rc = -ENOSYS;
1460 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1461 		/* not much we can do if it fails anyway, ignore rc */
1462 		rc = 0;
1463 	} else
1464 		spin_unlock(&cfile->file_info_lock);
1465 
1466 	buf = cfile->srch_inf.ntwrk_buf_start;
1467 	if (buf) {
1468 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1469 		cfile->srch_inf.ntwrk_buf_start = NULL;
1470 		if (cfile->srch_inf.smallBuf)
1471 			cifs_small_buf_release(buf);
1472 		else
1473 			cifs_buf_release(buf);
1474 	}
1475 
1476 	cifs_put_tlink(cfile->tlink);
1477 	kfree(file->private_data);
1478 	file->private_data = NULL;
1479 	/* BB can we lock the filestruct while this is going on? */
1480 	free_xid(xid);
1481 	return rc;
1482 }
1483 
1484 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1485 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1486 {
1487 	struct cifsLockInfo *lock =
1488 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1489 	if (!lock)
1490 		return lock;
1491 	lock->offset = offset;
1492 	lock->length = length;
1493 	lock->type = type;
1494 	lock->pid = current->tgid;
1495 	lock->flags = flags;
1496 	INIT_LIST_HEAD(&lock->blist);
1497 	init_waitqueue_head(&lock->block_q);
1498 	return lock;
1499 }
1500 
1501 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1502 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1503 {
1504 	struct cifsLockInfo *li, *tmp;
1505 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1506 		list_del_init(&li->blist);
1507 		wake_up(&li->block_q);
1508 	}
1509 }
1510 
1511 #define CIFS_LOCK_OP	0
1512 #define CIFS_READ_OP	1
1513 #define CIFS_WRITE_OP	2
1514 
1515 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1516 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1517 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1518 			    __u64 length, __u8 type, __u16 flags,
1519 			    struct cifsFileInfo *cfile,
1520 			    struct cifsLockInfo **conf_lock, int rw_check)
1521 {
1522 	struct cifsLockInfo *li;
1523 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1524 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1525 
1526 	list_for_each_entry(li, &fdlocks->locks, llist) {
1527 		if (offset + length <= li->offset ||
1528 		    offset >= li->offset + li->length)
1529 			continue;
1530 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1531 		    server->ops->compare_fids(cfile, cur_cfile)) {
1532 			/* shared lock prevents write op through the same fid */
1533 			if (!(li->type & server->vals->shared_lock_type) ||
1534 			    rw_check != CIFS_WRITE_OP)
1535 				continue;
1536 		}
1537 		if ((type & server->vals->shared_lock_type) &&
1538 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1539 		     current->tgid == li->pid) || type == li->type))
1540 			continue;
1541 		if (rw_check == CIFS_LOCK_OP &&
1542 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1543 		    server->ops->compare_fids(cfile, cur_cfile))
1544 			continue;
1545 		if (conf_lock)
1546 			*conf_lock = li;
1547 		return true;
1548 	}
1549 	return false;
1550 }
1551 
1552 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1553 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1554 			__u8 type, __u16 flags,
1555 			struct cifsLockInfo **conf_lock, int rw_check)
1556 {
1557 	bool rc = false;
1558 	struct cifs_fid_locks *cur;
1559 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1560 
1561 	list_for_each_entry(cur, &cinode->llist, llist) {
1562 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1563 						 flags, cfile, conf_lock,
1564 						 rw_check);
1565 		if (rc)
1566 			break;
1567 	}
1568 
1569 	return rc;
1570 }
1571 
1572 /*
1573  * Check if there is another lock that prevents us to set the lock (mandatory
1574  * style). If such a lock exists, update the flock structure with its
1575  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1576  * or leave it the same if we can't. Returns 0 if we don't need to request to
1577  * the server or 1 otherwise.
1578  */
1579 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1580 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1581 	       __u8 type, struct file_lock *flock)
1582 {
1583 	int rc = 0;
1584 	struct cifsLockInfo *conf_lock;
1585 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1586 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1587 	bool exist;
1588 
1589 	down_read(&cinode->lock_sem);
1590 
1591 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1592 					flock->c.flc_flags, &conf_lock,
1593 					CIFS_LOCK_OP);
1594 	if (exist) {
1595 		flock->fl_start = conf_lock->offset;
1596 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1597 		flock->c.flc_pid = conf_lock->pid;
1598 		if (conf_lock->type & server->vals->shared_lock_type)
1599 			flock->c.flc_type = F_RDLCK;
1600 		else
1601 			flock->c.flc_type = F_WRLCK;
1602 	} else if (!cinode->can_cache_brlcks)
1603 		rc = 1;
1604 	else
1605 		flock->c.flc_type = F_UNLCK;
1606 
1607 	up_read(&cinode->lock_sem);
1608 	return rc;
1609 }
1610 
1611 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1612 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1613 {
1614 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1615 	cifs_down_write(&cinode->lock_sem);
1616 	list_add_tail(&lock->llist, &cfile->llist->locks);
1617 	up_write(&cinode->lock_sem);
1618 }
1619 
1620 /*
1621  * Set the byte-range lock (mandatory style). Returns:
1622  * 1) 0, if we set the lock and don't need to request to the server;
1623  * 2) 1, if no locks prevent us but we need to request to the server;
1624  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1625  */
1626 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1627 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1628 		 bool wait)
1629 {
1630 	struct cifsLockInfo *conf_lock;
1631 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1632 	bool exist;
1633 	int rc = 0;
1634 
1635 try_again:
1636 	exist = false;
1637 	cifs_down_write(&cinode->lock_sem);
1638 
1639 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1640 					lock->type, lock->flags, &conf_lock,
1641 					CIFS_LOCK_OP);
1642 	if (!exist && cinode->can_cache_brlcks) {
1643 		list_add_tail(&lock->llist, &cfile->llist->locks);
1644 		up_write(&cinode->lock_sem);
1645 		return rc;
1646 	}
1647 
1648 	if (!exist)
1649 		rc = 1;
1650 	else if (!wait)
1651 		rc = -EACCES;
1652 	else {
1653 		list_add_tail(&lock->blist, &conf_lock->blist);
1654 		up_write(&cinode->lock_sem);
1655 		rc = wait_event_interruptible(lock->block_q,
1656 					(lock->blist.prev == &lock->blist) &&
1657 					(lock->blist.next == &lock->blist));
1658 		if (!rc)
1659 			goto try_again;
1660 		cifs_down_write(&cinode->lock_sem);
1661 		list_del_init(&lock->blist);
1662 	}
1663 
1664 	up_write(&cinode->lock_sem);
1665 	return rc;
1666 }
1667 
1668 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1669 /*
1670  * Check if there is another lock that prevents us to set the lock (posix
1671  * style). If such a lock exists, update the flock structure with its
1672  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1673  * or leave it the same if we can't. Returns 0 if we don't need to request to
1674  * the server or 1 otherwise.
1675  */
1676 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1677 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1678 {
1679 	int rc = 0;
1680 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1681 	unsigned char saved_type = flock->c.flc_type;
1682 
1683 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1684 		return 1;
1685 
1686 	down_read(&cinode->lock_sem);
1687 	posix_test_lock(file, flock);
1688 
1689 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1690 		flock->c.flc_type = saved_type;
1691 		rc = 1;
1692 	}
1693 
1694 	up_read(&cinode->lock_sem);
1695 	return rc;
1696 }
1697 
1698 /*
1699  * Set the byte-range lock (posix style). Returns:
1700  * 1) <0, if the error occurs while setting the lock;
1701  * 2) 0, if we set the lock and don't need to request to the server;
1702  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1703  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1704  */
1705 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1706 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1707 {
1708 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1709 	int rc = FILE_LOCK_DEFERRED + 1;
1710 
1711 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1712 		return rc;
1713 
1714 	cifs_down_write(&cinode->lock_sem);
1715 	if (!cinode->can_cache_brlcks) {
1716 		up_write(&cinode->lock_sem);
1717 		return rc;
1718 	}
1719 
1720 	rc = posix_lock_file(file, flock, NULL);
1721 	up_write(&cinode->lock_sem);
1722 	return rc;
1723 }
1724 
1725 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1726 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1727 {
1728 	unsigned int xid;
1729 	int rc = 0, stored_rc;
1730 	struct cifsLockInfo *li, *tmp;
1731 	struct cifs_tcon *tcon;
1732 	unsigned int num, max_num, max_buf;
1733 	LOCKING_ANDX_RANGE *buf, *cur;
1734 	static const int types[] = {
1735 		LOCKING_ANDX_LARGE_FILES,
1736 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1737 	};
1738 	int i;
1739 
1740 	xid = get_xid();
1741 	tcon = tlink_tcon(cfile->tlink);
1742 
1743 	/*
1744 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1745 	 * and check it before using.
1746 	 */
1747 	max_buf = tcon->ses->server->maxBuf;
1748 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1749 		free_xid(xid);
1750 		return -EINVAL;
1751 	}
1752 
1753 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1754 		     PAGE_SIZE);
1755 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1756 			PAGE_SIZE);
1757 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1758 						sizeof(LOCKING_ANDX_RANGE);
1759 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1760 	if (!buf) {
1761 		free_xid(xid);
1762 		return -ENOMEM;
1763 	}
1764 
1765 	for (i = 0; i < 2; i++) {
1766 		cur = buf;
1767 		num = 0;
1768 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1769 			if (li->type != types[i])
1770 				continue;
1771 			cur->Pid = cpu_to_le16(li->pid);
1772 			cur->LengthLow = cpu_to_le32((u32)li->length);
1773 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1774 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1775 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1776 			if (++num == max_num) {
1777 				stored_rc = cifs_lockv(xid, tcon,
1778 						       cfile->fid.netfid,
1779 						       (__u8)li->type, 0, num,
1780 						       buf);
1781 				if (stored_rc)
1782 					rc = stored_rc;
1783 				cur = buf;
1784 				num = 0;
1785 			} else
1786 				cur++;
1787 		}
1788 
1789 		if (num) {
1790 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1791 					       (__u8)types[i], 0, num, buf);
1792 			if (stored_rc)
1793 				rc = stored_rc;
1794 		}
1795 	}
1796 
1797 	kfree(buf);
1798 	free_xid(xid);
1799 	return rc;
1800 }
1801 
1802 static __u32
hash_lockowner(fl_owner_t owner)1803 hash_lockowner(fl_owner_t owner)
1804 {
1805 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1806 }
1807 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1808 
1809 struct lock_to_push {
1810 	struct list_head llist;
1811 	__u64 offset;
1812 	__u64 length;
1813 	__u32 pid;
1814 	__u16 netfid;
1815 	__u8 type;
1816 };
1817 
1818 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1819 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1820 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1821 {
1822 	struct inode *inode = d_inode(cfile->dentry);
1823 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1824 	struct file_lock *flock;
1825 	struct file_lock_context *flctx = locks_inode_context(inode);
1826 	unsigned int count = 0, i;
1827 	int rc = 0, xid, type;
1828 	struct list_head locks_to_send, *el;
1829 	struct lock_to_push *lck, *tmp;
1830 	__u64 length;
1831 
1832 	xid = get_xid();
1833 
1834 	if (!flctx)
1835 		goto out;
1836 
1837 	spin_lock(&flctx->flc_lock);
1838 	list_for_each(el, &flctx->flc_posix) {
1839 		count++;
1840 	}
1841 	spin_unlock(&flctx->flc_lock);
1842 
1843 	INIT_LIST_HEAD(&locks_to_send);
1844 
1845 	/*
1846 	 * Allocating count locks is enough because no FL_POSIX locks can be
1847 	 * added to the list while we are holding cinode->lock_sem that
1848 	 * protects locking operations of this inode.
1849 	 */
1850 	for (i = 0; i < count; i++) {
1851 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1852 		if (!lck) {
1853 			rc = -ENOMEM;
1854 			goto err_out;
1855 		}
1856 		list_add_tail(&lck->llist, &locks_to_send);
1857 	}
1858 
1859 	el = locks_to_send.next;
1860 	spin_lock(&flctx->flc_lock);
1861 	for_each_file_lock(flock, &flctx->flc_posix) {
1862 		unsigned char ftype = flock->c.flc_type;
1863 
1864 		if (el == &locks_to_send) {
1865 			/*
1866 			 * The list ended. We don't have enough allocated
1867 			 * structures - something is really wrong.
1868 			 */
1869 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1870 			break;
1871 		}
1872 		length = cifs_flock_len(flock);
1873 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1874 			type = CIFS_RDLCK;
1875 		else
1876 			type = CIFS_WRLCK;
1877 		lck = list_entry(el, struct lock_to_push, llist);
1878 		lck->pid = hash_lockowner(flock->c.flc_owner);
1879 		lck->netfid = cfile->fid.netfid;
1880 		lck->length = length;
1881 		lck->type = type;
1882 		lck->offset = flock->fl_start;
1883 	}
1884 	spin_unlock(&flctx->flc_lock);
1885 
1886 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1887 		int stored_rc;
1888 
1889 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1890 					     lck->offset, lck->length, NULL,
1891 					     lck->type, 0);
1892 		if (stored_rc)
1893 			rc = stored_rc;
1894 		list_del(&lck->llist);
1895 		kfree(lck);
1896 	}
1897 
1898 out:
1899 	free_xid(xid);
1900 	return rc;
1901 err_out:
1902 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1903 		list_del(&lck->llist);
1904 		kfree(lck);
1905 	}
1906 	goto out;
1907 }
1908 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1909 
1910 static int
cifs_push_locks(struct cifsFileInfo * cfile)1911 cifs_push_locks(struct cifsFileInfo *cfile)
1912 {
1913 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1914 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1915 	int rc = 0;
1916 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1917 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1918 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1919 
1920 	/* we are going to update can_cache_brlcks here - need a write access */
1921 	cifs_down_write(&cinode->lock_sem);
1922 	if (!cinode->can_cache_brlcks) {
1923 		up_write(&cinode->lock_sem);
1924 		return rc;
1925 	}
1926 
1927 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1928 	if (cap_unix(tcon->ses) &&
1929 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1930 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1931 		rc = cifs_push_posix_locks(cfile);
1932 	else
1933 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1934 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1935 
1936 	cinode->can_cache_brlcks = false;
1937 	up_write(&cinode->lock_sem);
1938 	return rc;
1939 }
1940 
1941 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1942 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1943 		bool *wait_flag, struct TCP_Server_Info *server)
1944 {
1945 	if (flock->c.flc_flags & FL_POSIX)
1946 		cifs_dbg(FYI, "Posix\n");
1947 	if (flock->c.flc_flags & FL_FLOCK)
1948 		cifs_dbg(FYI, "Flock\n");
1949 	if (flock->c.flc_flags & FL_SLEEP) {
1950 		cifs_dbg(FYI, "Blocking lock\n");
1951 		*wait_flag = true;
1952 	}
1953 	if (flock->c.flc_flags & FL_ACCESS)
1954 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1955 	if (flock->c.flc_flags & FL_LEASE)
1956 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1957 	if (flock->c.flc_flags &
1958 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1959 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1960 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1961 		         flock->c.flc_flags);
1962 
1963 	*type = server->vals->large_lock_type;
1964 	if (lock_is_write(flock)) {
1965 		cifs_dbg(FYI, "F_WRLCK\n");
1966 		*type |= server->vals->exclusive_lock_type;
1967 		*lock = 1;
1968 	} else if (lock_is_unlock(flock)) {
1969 		cifs_dbg(FYI, "F_UNLCK\n");
1970 		*type |= server->vals->unlock_lock_type;
1971 		*unlock = 1;
1972 		/* Check if unlock includes more than one lock range */
1973 	} else if (lock_is_read(flock)) {
1974 		cifs_dbg(FYI, "F_RDLCK\n");
1975 		*type |= server->vals->shared_lock_type;
1976 		*lock = 1;
1977 	} else if (flock->c.flc_type == F_EXLCK) {
1978 		cifs_dbg(FYI, "F_EXLCK\n");
1979 		*type |= server->vals->exclusive_lock_type;
1980 		*lock = 1;
1981 	} else if (flock->c.flc_type == F_SHLCK) {
1982 		cifs_dbg(FYI, "F_SHLCK\n");
1983 		*type |= server->vals->shared_lock_type;
1984 		*lock = 1;
1985 	} else
1986 		cifs_dbg(FYI, "Unknown type of lock\n");
1987 }
1988 
1989 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)1990 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1991 	   bool wait_flag, bool posix_lck, unsigned int xid)
1992 {
1993 	int rc = 0;
1994 	__u64 length = cifs_flock_len(flock);
1995 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1996 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1997 	struct TCP_Server_Info *server = tcon->ses->server;
1998 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1999 	__u16 netfid = cfile->fid.netfid;
2000 
2001 	if (posix_lck) {
2002 		int posix_lock_type;
2003 
2004 		rc = cifs_posix_lock_test(file, flock);
2005 		if (!rc)
2006 			return rc;
2007 
2008 		if (type & server->vals->shared_lock_type)
2009 			posix_lock_type = CIFS_RDLCK;
2010 		else
2011 			posix_lock_type = CIFS_WRLCK;
2012 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2013 				      hash_lockowner(flock->c.flc_owner),
2014 				      flock->fl_start, length, flock,
2015 				      posix_lock_type, wait_flag);
2016 		return rc;
2017 	}
2018 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2019 
2020 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2021 	if (!rc)
2022 		return rc;
2023 
2024 	/* BB we could chain these into one lock request BB */
2025 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2026 				    1, 0, false);
2027 	if (rc == 0) {
2028 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2029 					    type, 0, 1, false);
2030 		flock->c.flc_type = F_UNLCK;
2031 		if (rc != 0)
2032 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2033 				 rc);
2034 		return 0;
2035 	}
2036 
2037 	if (type & server->vals->shared_lock_type) {
2038 		flock->c.flc_type = F_WRLCK;
2039 		return 0;
2040 	}
2041 
2042 	type &= ~server->vals->exclusive_lock_type;
2043 
2044 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2045 				    type | server->vals->shared_lock_type,
2046 				    1, 0, false);
2047 	if (rc == 0) {
2048 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2049 			type | server->vals->shared_lock_type, 0, 1, false);
2050 		flock->c.flc_type = F_RDLCK;
2051 		if (rc != 0)
2052 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2053 				 rc);
2054 	} else
2055 		flock->c.flc_type = F_WRLCK;
2056 
2057 	return 0;
2058 }
2059 
2060 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2061 cifs_move_llist(struct list_head *source, struct list_head *dest)
2062 {
2063 	struct list_head *li, *tmp;
2064 	list_for_each_safe(li, tmp, source)
2065 		list_move(li, dest);
2066 }
2067 
2068 void
cifs_free_llist(struct list_head * llist)2069 cifs_free_llist(struct list_head *llist)
2070 {
2071 	struct cifsLockInfo *li, *tmp;
2072 	list_for_each_entry_safe(li, tmp, llist, llist) {
2073 		cifs_del_lock_waiters(li);
2074 		list_del(&li->llist);
2075 		kfree(li);
2076 	}
2077 }
2078 
2079 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2080 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2081 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2082 		  unsigned int xid)
2083 {
2084 	int rc = 0, stored_rc;
2085 	static const int types[] = {
2086 		LOCKING_ANDX_LARGE_FILES,
2087 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2088 	};
2089 	unsigned int i;
2090 	unsigned int max_num, num, max_buf;
2091 	LOCKING_ANDX_RANGE *buf, *cur;
2092 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2093 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2094 	struct cifsLockInfo *li, *tmp;
2095 	__u64 length = cifs_flock_len(flock);
2096 	LIST_HEAD(tmp_llist);
2097 
2098 	/*
2099 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2100 	 * and check it before using.
2101 	 */
2102 	max_buf = tcon->ses->server->maxBuf;
2103 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2104 		return -EINVAL;
2105 
2106 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2107 		     PAGE_SIZE);
2108 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2109 			PAGE_SIZE);
2110 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2111 						sizeof(LOCKING_ANDX_RANGE);
2112 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2113 	if (!buf)
2114 		return -ENOMEM;
2115 
2116 	cifs_down_write(&cinode->lock_sem);
2117 	for (i = 0; i < 2; i++) {
2118 		cur = buf;
2119 		num = 0;
2120 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2121 			if (flock->fl_start > li->offset ||
2122 			    (flock->fl_start + length) <
2123 			    (li->offset + li->length))
2124 				continue;
2125 			if (current->tgid != li->pid)
2126 				continue;
2127 			if (types[i] != li->type)
2128 				continue;
2129 			if (cinode->can_cache_brlcks) {
2130 				/*
2131 				 * We can cache brlock requests - simply remove
2132 				 * a lock from the file's list.
2133 				 */
2134 				list_del(&li->llist);
2135 				cifs_del_lock_waiters(li);
2136 				kfree(li);
2137 				continue;
2138 			}
2139 			cur->Pid = cpu_to_le16(li->pid);
2140 			cur->LengthLow = cpu_to_le32((u32)li->length);
2141 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2142 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2143 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2144 			/*
2145 			 * We need to save a lock here to let us add it again to
2146 			 * the file's list if the unlock range request fails on
2147 			 * the server.
2148 			 */
2149 			list_move(&li->llist, &tmp_llist);
2150 			if (++num == max_num) {
2151 				stored_rc = cifs_lockv(xid, tcon,
2152 						       cfile->fid.netfid,
2153 						       li->type, num, 0, buf);
2154 				if (stored_rc) {
2155 					/*
2156 					 * We failed on the unlock range
2157 					 * request - add all locks from the tmp
2158 					 * list to the head of the file's list.
2159 					 */
2160 					cifs_move_llist(&tmp_llist,
2161 							&cfile->llist->locks);
2162 					rc = stored_rc;
2163 				} else
2164 					/*
2165 					 * The unlock range request succeed -
2166 					 * free the tmp list.
2167 					 */
2168 					cifs_free_llist(&tmp_llist);
2169 				cur = buf;
2170 				num = 0;
2171 			} else
2172 				cur++;
2173 		}
2174 		if (num) {
2175 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2176 					       types[i], num, 0, buf);
2177 			if (stored_rc) {
2178 				cifs_move_llist(&tmp_llist,
2179 						&cfile->llist->locks);
2180 				rc = stored_rc;
2181 			} else
2182 				cifs_free_llist(&tmp_llist);
2183 		}
2184 	}
2185 
2186 	up_write(&cinode->lock_sem);
2187 	kfree(buf);
2188 	return rc;
2189 }
2190 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2191 
2192 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2193 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2194 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2195 	   unsigned int xid)
2196 {
2197 	int rc = 0;
2198 	__u64 length = cifs_flock_len(flock);
2199 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2200 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2201 	struct TCP_Server_Info *server = tcon->ses->server;
2202 	struct inode *inode = d_inode(cfile->dentry);
2203 
2204 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2205 	if (posix_lck) {
2206 		int posix_lock_type;
2207 
2208 		rc = cifs_posix_lock_set(file, flock);
2209 		if (rc <= FILE_LOCK_DEFERRED)
2210 			return rc;
2211 
2212 		if (type & server->vals->shared_lock_type)
2213 			posix_lock_type = CIFS_RDLCK;
2214 		else
2215 			posix_lock_type = CIFS_WRLCK;
2216 
2217 		if (unlock == 1)
2218 			posix_lock_type = CIFS_UNLCK;
2219 
2220 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2221 				      hash_lockowner(flock->c.flc_owner),
2222 				      flock->fl_start, length,
2223 				      NULL, posix_lock_type, wait_flag);
2224 		goto out;
2225 	}
2226 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2227 	if (lock) {
2228 		struct cifsLockInfo *lock;
2229 
2230 		lock = cifs_lock_init(flock->fl_start, length, type,
2231 				      flock->c.flc_flags);
2232 		if (!lock)
2233 			return -ENOMEM;
2234 
2235 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2236 		if (rc < 0) {
2237 			kfree(lock);
2238 			return rc;
2239 		}
2240 		if (!rc)
2241 			goto out;
2242 
2243 		/*
2244 		 * Windows 7 server can delay breaking lease from read to None
2245 		 * if we set a byte-range lock on a file - break it explicitly
2246 		 * before sending the lock to the server to be sure the next
2247 		 * read won't conflict with non-overlapted locks due to
2248 		 * pagereading.
2249 		 */
2250 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2251 					CIFS_CACHE_READ(CIFS_I(inode))) {
2252 			cifs_zap_mapping(inode);
2253 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2254 				 inode);
2255 			CIFS_I(inode)->oplock = 0;
2256 		}
2257 
2258 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2259 					    type, 1, 0, wait_flag);
2260 		if (rc) {
2261 			kfree(lock);
2262 			return rc;
2263 		}
2264 
2265 		cifs_lock_add(cfile, lock);
2266 	} else if (unlock)
2267 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2268 
2269 out:
2270 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2271 		/*
2272 		 * If this is a request to remove all locks because we
2273 		 * are closing the file, it doesn't matter if the
2274 		 * unlocking failed as both cifs.ko and the SMB server
2275 		 * remove the lock on file close
2276 		 */
2277 		if (rc) {
2278 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2279 			if (!(flock->c.flc_flags & FL_CLOSE))
2280 				return rc;
2281 		}
2282 		rc = locks_lock_file_wait(file, flock);
2283 	}
2284 	return rc;
2285 }
2286 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2287 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2288 {
2289 	int rc, xid;
2290 	int lock = 0, unlock = 0;
2291 	bool wait_flag = false;
2292 	bool posix_lck = false;
2293 	struct cifs_sb_info *cifs_sb;
2294 	struct cifs_tcon *tcon;
2295 	struct cifsFileInfo *cfile;
2296 	__u32 type;
2297 
2298 	xid = get_xid();
2299 
2300 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2301 		rc = -ENOLCK;
2302 		free_xid(xid);
2303 		return rc;
2304 	}
2305 
2306 	cfile = (struct cifsFileInfo *)file->private_data;
2307 	tcon = tlink_tcon(cfile->tlink);
2308 
2309 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2310 			tcon->ses->server);
2311 	cifs_sb = CIFS_FILE_SB(file);
2312 
2313 	if (cap_unix(tcon->ses) &&
2314 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2315 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2316 		posix_lck = true;
2317 
2318 	if (!lock && !unlock) {
2319 		/*
2320 		 * if no lock or unlock then nothing to do since we do not
2321 		 * know what it is
2322 		 */
2323 		rc = -EOPNOTSUPP;
2324 		free_xid(xid);
2325 		return rc;
2326 	}
2327 
2328 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2329 			xid);
2330 	free_xid(xid);
2331 	return rc;
2332 
2333 
2334 }
2335 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2336 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2337 {
2338 	int rc, xid;
2339 	int lock = 0, unlock = 0;
2340 	bool wait_flag = false;
2341 	bool posix_lck = false;
2342 	struct cifs_sb_info *cifs_sb;
2343 	struct cifs_tcon *tcon;
2344 	struct cifsFileInfo *cfile;
2345 	__u32 type;
2346 
2347 	rc = -EACCES;
2348 	xid = get_xid();
2349 
2350 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2351 		 flock->c.flc_flags, flock->c.flc_type,
2352 		 (long long)flock->fl_start,
2353 		 (long long)flock->fl_end);
2354 
2355 	cfile = (struct cifsFileInfo *)file->private_data;
2356 	tcon = tlink_tcon(cfile->tlink);
2357 
2358 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2359 			tcon->ses->server);
2360 	cifs_sb = CIFS_FILE_SB(file);
2361 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2362 
2363 	if (cap_unix(tcon->ses) &&
2364 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2365 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2366 		posix_lck = true;
2367 	/*
2368 	 * BB add code here to normalize offset and length to account for
2369 	 * negative length which we can not accept over the wire.
2370 	 */
2371 	if (IS_GETLK(cmd)) {
2372 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2373 		free_xid(xid);
2374 		return rc;
2375 	}
2376 
2377 	if (!lock && !unlock) {
2378 		/*
2379 		 * if no lock or unlock then nothing to do since we do not
2380 		 * know what it is
2381 		 */
2382 		free_xid(xid);
2383 		return -EOPNOTSUPP;
2384 	}
2385 
2386 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2387 			xid);
2388 	free_xid(xid);
2389 	return rc;
2390 }
2391 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result,bool was_async)2392 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2393 				      bool was_async)
2394 {
2395 	struct netfs_io_request *wreq = wdata->rreq;
2396 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2397 	loff_t wrend;
2398 
2399 	if (result > 0) {
2400 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2401 
2402 		if (wrend > ictx->zero_point &&
2403 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2404 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2405 			ictx->zero_point = wrend;
2406 		if (wrend > ictx->remote_i_size)
2407 			netfs_resize_file(ictx, wrend, true);
2408 	}
2409 
2410 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2411 }
2412 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2413 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2414 					bool fsuid_only)
2415 {
2416 	struct cifsFileInfo *open_file = NULL;
2417 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2418 
2419 	/* only filter by fsuid on multiuser mounts */
2420 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2421 		fsuid_only = false;
2422 
2423 	spin_lock(&cifs_inode->open_file_lock);
2424 	/* we could simply get the first_list_entry since write-only entries
2425 	   are always at the end of the list but since the first entry might
2426 	   have a close pending, we go through the whole list */
2427 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2428 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2429 			continue;
2430 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2431 			if ((!open_file->invalidHandle)) {
2432 				/* found a good file */
2433 				/* lock it so it will not be closed on us */
2434 				cifsFileInfo_get(open_file);
2435 				spin_unlock(&cifs_inode->open_file_lock);
2436 				return open_file;
2437 			} /* else might as well continue, and look for
2438 			     another, or simply have the caller reopen it
2439 			     again rather than trying to fix this handle */
2440 		} else /* write only file */
2441 			break; /* write only files are last so must be done */
2442 	}
2443 	spin_unlock(&cifs_inode->open_file_lock);
2444 	return NULL;
2445 }
2446 
2447 /* Return -EBADF if no handle is found and general rc otherwise */
2448 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2449 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2450 		       struct cifsFileInfo **ret_file)
2451 {
2452 	struct cifsFileInfo *open_file, *inv_file = NULL;
2453 	struct cifs_sb_info *cifs_sb;
2454 	bool any_available = false;
2455 	int rc = -EBADF;
2456 	unsigned int refind = 0;
2457 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2458 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2459 	*ret_file = NULL;
2460 
2461 	/*
2462 	 * Having a null inode here (because mapping->host was set to zero by
2463 	 * the VFS or MM) should not happen but we had reports of on oops (due
2464 	 * to it being zero) during stress testcases so we need to check for it
2465 	 */
2466 
2467 	if (cifs_inode == NULL) {
2468 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2469 		dump_stack();
2470 		return rc;
2471 	}
2472 
2473 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2474 
2475 	/* only filter by fsuid on multiuser mounts */
2476 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2477 		fsuid_only = false;
2478 
2479 	spin_lock(&cifs_inode->open_file_lock);
2480 refind_writable:
2481 	if (refind > MAX_REOPEN_ATT) {
2482 		spin_unlock(&cifs_inode->open_file_lock);
2483 		return rc;
2484 	}
2485 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2486 		if (!any_available && open_file->pid != current->tgid)
2487 			continue;
2488 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2489 			continue;
2490 		if (with_delete && !(open_file->fid.access & DELETE))
2491 			continue;
2492 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2493 			if (!open_file->invalidHandle) {
2494 				/* found a good writable file */
2495 				cifsFileInfo_get(open_file);
2496 				spin_unlock(&cifs_inode->open_file_lock);
2497 				*ret_file = open_file;
2498 				return 0;
2499 			} else {
2500 				if (!inv_file)
2501 					inv_file = open_file;
2502 			}
2503 		}
2504 	}
2505 	/* couldn't find usable FH with same pid, try any available */
2506 	if (!any_available) {
2507 		any_available = true;
2508 		goto refind_writable;
2509 	}
2510 
2511 	if (inv_file) {
2512 		any_available = false;
2513 		cifsFileInfo_get(inv_file);
2514 	}
2515 
2516 	spin_unlock(&cifs_inode->open_file_lock);
2517 
2518 	if (inv_file) {
2519 		rc = cifs_reopen_file(inv_file, false);
2520 		if (!rc) {
2521 			*ret_file = inv_file;
2522 			return 0;
2523 		}
2524 
2525 		spin_lock(&cifs_inode->open_file_lock);
2526 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2527 		spin_unlock(&cifs_inode->open_file_lock);
2528 		cifsFileInfo_put(inv_file);
2529 		++refind;
2530 		inv_file = NULL;
2531 		spin_lock(&cifs_inode->open_file_lock);
2532 		goto refind_writable;
2533 	}
2534 
2535 	return rc;
2536 }
2537 
2538 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2539 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2540 {
2541 	struct cifsFileInfo *cfile;
2542 	int rc;
2543 
2544 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2545 	if (rc)
2546 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2547 
2548 	return cfile;
2549 }
2550 
2551 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2552 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2553 		       int flags,
2554 		       struct cifsFileInfo **ret_file)
2555 {
2556 	struct cifsFileInfo *cfile;
2557 	void *page = alloc_dentry_path();
2558 
2559 	*ret_file = NULL;
2560 
2561 	spin_lock(&tcon->open_file_lock);
2562 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2563 		struct cifsInodeInfo *cinode;
2564 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2565 		if (IS_ERR(full_path)) {
2566 			spin_unlock(&tcon->open_file_lock);
2567 			free_dentry_path(page);
2568 			return PTR_ERR(full_path);
2569 		}
2570 		if (strcmp(full_path, name))
2571 			continue;
2572 
2573 		cinode = CIFS_I(d_inode(cfile->dentry));
2574 		spin_unlock(&tcon->open_file_lock);
2575 		free_dentry_path(page);
2576 		return cifs_get_writable_file(cinode, flags, ret_file);
2577 	}
2578 
2579 	spin_unlock(&tcon->open_file_lock);
2580 	free_dentry_path(page);
2581 	return -ENOENT;
2582 }
2583 
2584 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2585 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2586 		       struct cifsFileInfo **ret_file)
2587 {
2588 	struct cifsFileInfo *cfile;
2589 	void *page = alloc_dentry_path();
2590 
2591 	*ret_file = NULL;
2592 
2593 	spin_lock(&tcon->open_file_lock);
2594 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2595 		struct cifsInodeInfo *cinode;
2596 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2597 		if (IS_ERR(full_path)) {
2598 			spin_unlock(&tcon->open_file_lock);
2599 			free_dentry_path(page);
2600 			return PTR_ERR(full_path);
2601 		}
2602 		if (strcmp(full_path, name))
2603 			continue;
2604 
2605 		cinode = CIFS_I(d_inode(cfile->dentry));
2606 		spin_unlock(&tcon->open_file_lock);
2607 		free_dentry_path(page);
2608 		*ret_file = find_readable_file(cinode, 0);
2609 		return *ret_file ? 0 : -ENOENT;
2610 	}
2611 
2612 	spin_unlock(&tcon->open_file_lock);
2613 	free_dentry_path(page);
2614 	return -ENOENT;
2615 }
2616 
2617 /*
2618  * Flush data on a strict file.
2619  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2620 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2621 		      int datasync)
2622 {
2623 	unsigned int xid;
2624 	int rc = 0;
2625 	struct cifs_tcon *tcon;
2626 	struct TCP_Server_Info *server;
2627 	struct cifsFileInfo *smbfile = file->private_data;
2628 	struct inode *inode = file_inode(file);
2629 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2630 
2631 	rc = file_write_and_wait_range(file, start, end);
2632 	if (rc) {
2633 		trace_cifs_fsync_err(inode->i_ino, rc);
2634 		return rc;
2635 	}
2636 
2637 	xid = get_xid();
2638 
2639 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2640 		 file, datasync);
2641 
2642 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2643 		rc = cifs_zap_mapping(inode);
2644 		if (rc) {
2645 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2646 			rc = 0; /* don't care about it in fsync */
2647 		}
2648 	}
2649 
2650 	tcon = tlink_tcon(smbfile->tlink);
2651 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2652 		server = tcon->ses->server;
2653 		if (server->ops->flush == NULL) {
2654 			rc = -ENOSYS;
2655 			goto strict_fsync_exit;
2656 		}
2657 
2658 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2659 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2660 			if (smbfile) {
2661 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2662 				cifsFileInfo_put(smbfile);
2663 			} else
2664 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2665 		} else
2666 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2667 	}
2668 
2669 strict_fsync_exit:
2670 	free_xid(xid);
2671 	return rc;
2672 }
2673 
2674 /*
2675  * Flush data on a non-strict data.
2676  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2677 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2678 {
2679 	unsigned int xid;
2680 	int rc = 0;
2681 	struct cifs_tcon *tcon;
2682 	struct TCP_Server_Info *server;
2683 	struct cifsFileInfo *smbfile = file->private_data;
2684 	struct inode *inode = file_inode(file);
2685 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2686 
2687 	rc = file_write_and_wait_range(file, start, end);
2688 	if (rc) {
2689 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2690 		return rc;
2691 	}
2692 
2693 	xid = get_xid();
2694 
2695 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2696 		 file, datasync);
2697 
2698 	tcon = tlink_tcon(smbfile->tlink);
2699 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2700 		server = tcon->ses->server;
2701 		if (server->ops->flush == NULL) {
2702 			rc = -ENOSYS;
2703 			goto fsync_exit;
2704 		}
2705 
2706 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2707 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2708 			if (smbfile) {
2709 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2710 				cifsFileInfo_put(smbfile);
2711 			} else
2712 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2713 		} else
2714 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2715 	}
2716 
2717 fsync_exit:
2718 	free_xid(xid);
2719 	return rc;
2720 }
2721 
2722 /*
2723  * As file closes, flush all cached write data for this inode checking
2724  * for write behind errors.
2725  */
cifs_flush(struct file * file,fl_owner_t id)2726 int cifs_flush(struct file *file, fl_owner_t id)
2727 {
2728 	struct inode *inode = file_inode(file);
2729 	int rc = 0;
2730 
2731 	if (file->f_mode & FMODE_WRITE)
2732 		rc = filemap_write_and_wait(inode->i_mapping);
2733 
2734 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2735 	if (rc) {
2736 		/* get more nuanced writeback errors */
2737 		rc = filemap_check_wb_err(file->f_mapping, 0);
2738 		trace_cifs_flush_err(inode->i_ino, rc);
2739 	}
2740 	return rc;
2741 }
2742 
2743 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2744 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2745 {
2746 	struct file *file = iocb->ki_filp;
2747 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2748 	struct inode *inode = file->f_mapping->host;
2749 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2750 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2751 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2752 	ssize_t rc;
2753 
2754 	rc = netfs_start_io_write(inode);
2755 	if (rc < 0)
2756 		return rc;
2757 
2758 	/*
2759 	 * We need to hold the sem to be sure nobody modifies lock list
2760 	 * with a brlock that prevents writing.
2761 	 */
2762 	down_read(&cinode->lock_sem);
2763 
2764 	rc = generic_write_checks(iocb, from);
2765 	if (rc <= 0)
2766 		goto out;
2767 
2768 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2769 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2770 				     server->vals->exclusive_lock_type, 0,
2771 				     NULL, CIFS_WRITE_OP))) {
2772 		rc = -EACCES;
2773 		goto out;
2774 	}
2775 
2776 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2777 
2778 out:
2779 	up_read(&cinode->lock_sem);
2780 	netfs_end_io_write(inode);
2781 	if (rc > 0)
2782 		rc = generic_write_sync(iocb, rc);
2783 	return rc;
2784 }
2785 
2786 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2787 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2788 {
2789 	struct inode *inode = file_inode(iocb->ki_filp);
2790 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2791 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2792 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2793 						iocb->ki_filp->private_data;
2794 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2795 	ssize_t written;
2796 
2797 	written = cifs_get_writer(cinode);
2798 	if (written)
2799 		return written;
2800 
2801 	if (CIFS_CACHE_WRITE(cinode)) {
2802 		if (cap_unix(tcon->ses) &&
2803 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2804 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2805 			written = netfs_file_write_iter(iocb, from);
2806 			goto out;
2807 		}
2808 		written = cifs_writev(iocb, from);
2809 		goto out;
2810 	}
2811 	/*
2812 	 * For non-oplocked files in strict cache mode we need to write the data
2813 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2814 	 * affected pages because it may cause a error with mandatory locks on
2815 	 * these pages but not on the region from pos to ppos+len-1.
2816 	 */
2817 	written = netfs_file_write_iter(iocb, from);
2818 	if (CIFS_CACHE_READ(cinode)) {
2819 		/*
2820 		 * We have read level caching and we have just sent a write
2821 		 * request to the server thus making data in the cache stale.
2822 		 * Zap the cache and set oplock/lease level to NONE to avoid
2823 		 * reading stale data from the cache. All subsequent read
2824 		 * operations will read new data from the server.
2825 		 */
2826 		cifs_zap_mapping(inode);
2827 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2828 			 inode);
2829 		cinode->oplock = 0;
2830 	}
2831 out:
2832 	cifs_put_writer(cinode);
2833 	return written;
2834 }
2835 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2836 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2837 {
2838 	ssize_t rc;
2839 	struct inode *inode = file_inode(iocb->ki_filp);
2840 
2841 	if (iocb->ki_flags & IOCB_DIRECT)
2842 		return netfs_unbuffered_read_iter(iocb, iter);
2843 
2844 	rc = cifs_revalidate_mapping(inode);
2845 	if (rc)
2846 		return rc;
2847 
2848 	return netfs_file_read_iter(iocb, iter);
2849 }
2850 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2851 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2852 {
2853 	struct inode *inode = file_inode(iocb->ki_filp);
2854 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2855 	ssize_t written;
2856 	int rc;
2857 
2858 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2859 		written = netfs_unbuffered_write_iter(iocb, from);
2860 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2861 			cifs_zap_mapping(inode);
2862 			cifs_dbg(FYI,
2863 				 "Set no oplock for inode=%p after a write operation\n",
2864 				 inode);
2865 			cinode->oplock = 0;
2866 		}
2867 		return written;
2868 	}
2869 
2870 	written = cifs_get_writer(cinode);
2871 	if (written)
2872 		return written;
2873 
2874 	written = netfs_file_write_iter(iocb, from);
2875 
2876 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2877 		rc = filemap_fdatawrite(inode->i_mapping);
2878 		if (rc)
2879 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2880 				 rc, inode);
2881 	}
2882 
2883 	cifs_put_writer(cinode);
2884 	return written;
2885 }
2886 
2887 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2888 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2889 {
2890 	struct inode *inode = file_inode(iocb->ki_filp);
2891 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2892 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2893 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2894 						iocb->ki_filp->private_data;
2895 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2896 	int rc = -EACCES;
2897 
2898 	/*
2899 	 * In strict cache mode we need to read from the server all the time
2900 	 * if we don't have level II oplock because the server can delay mtime
2901 	 * change - so we can't make a decision about inode invalidating.
2902 	 * And we can also fail with pagereading if there are mandatory locks
2903 	 * on pages affected by this read but not on the region from pos to
2904 	 * pos+len-1.
2905 	 */
2906 	if (!CIFS_CACHE_READ(cinode))
2907 		return netfs_unbuffered_read_iter(iocb, to);
2908 
2909 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2910 		if (iocb->ki_flags & IOCB_DIRECT)
2911 			return netfs_unbuffered_read_iter(iocb, to);
2912 		return netfs_buffered_read_iter(iocb, to);
2913 	}
2914 
2915 	/*
2916 	 * We need to hold the sem to be sure nobody modifies lock list
2917 	 * with a brlock that prevents reading.
2918 	 */
2919 	if (iocb->ki_flags & IOCB_DIRECT) {
2920 		rc = netfs_start_io_direct(inode);
2921 		if (rc < 0)
2922 			goto out;
2923 		rc = -EACCES;
2924 		down_read(&cinode->lock_sem);
2925 		if (!cifs_find_lock_conflict(
2926 			    cfile, iocb->ki_pos, iov_iter_count(to),
2927 			    tcon->ses->server->vals->shared_lock_type,
2928 			    0, NULL, CIFS_READ_OP))
2929 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2930 		up_read(&cinode->lock_sem);
2931 		netfs_end_io_direct(inode);
2932 	} else {
2933 		rc = netfs_start_io_read(inode);
2934 		if (rc < 0)
2935 			goto out;
2936 		rc = -EACCES;
2937 		down_read(&cinode->lock_sem);
2938 		if (!cifs_find_lock_conflict(
2939 			    cfile, iocb->ki_pos, iov_iter_count(to),
2940 			    tcon->ses->server->vals->shared_lock_type,
2941 			    0, NULL, CIFS_READ_OP))
2942 			rc = filemap_read(iocb, to, 0);
2943 		up_read(&cinode->lock_sem);
2944 		netfs_end_io_read(inode);
2945 	}
2946 out:
2947 	return rc;
2948 }
2949 
cifs_page_mkwrite(struct vm_fault * vmf)2950 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2951 {
2952 	return netfs_page_mkwrite(vmf, NULL);
2953 }
2954 
2955 static const struct vm_operations_struct cifs_file_vm_ops = {
2956 	.fault = filemap_fault,
2957 	.map_pages = filemap_map_pages,
2958 	.page_mkwrite = cifs_page_mkwrite,
2959 };
2960 
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)2961 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2962 {
2963 	int xid, rc = 0;
2964 	struct inode *inode = file_inode(file);
2965 
2966 	xid = get_xid();
2967 
2968 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
2969 		rc = cifs_zap_mapping(inode);
2970 	if (!rc)
2971 		rc = generic_file_mmap(file, vma);
2972 	if (!rc)
2973 		vma->vm_ops = &cifs_file_vm_ops;
2974 
2975 	free_xid(xid);
2976 	return rc;
2977 }
2978 
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)2979 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2980 {
2981 	int rc, xid;
2982 
2983 	xid = get_xid();
2984 
2985 	rc = cifs_revalidate_file(file);
2986 	if (rc)
2987 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2988 			 rc);
2989 	if (!rc)
2990 		rc = generic_file_mmap(file, vma);
2991 	if (!rc)
2992 		vma->vm_ops = &cifs_file_vm_ops;
2993 
2994 	free_xid(xid);
2995 	return rc;
2996 }
2997 
is_inode_writable(struct cifsInodeInfo * cifs_inode)2998 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2999 {
3000 	struct cifsFileInfo *open_file;
3001 
3002 	spin_lock(&cifs_inode->open_file_lock);
3003 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3004 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3005 			spin_unlock(&cifs_inode->open_file_lock);
3006 			return 1;
3007 		}
3008 	}
3009 	spin_unlock(&cifs_inode->open_file_lock);
3010 	return 0;
3011 }
3012 
3013 /* We do not want to update the file size from server for inodes
3014    open for write - to avoid races with writepage extending
3015    the file - in the future we could consider allowing
3016    refreshing the inode only on increases in the file size
3017    but this is tricky to do without racing with writebehind
3018    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3019 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3020 			    bool from_readdir)
3021 {
3022 	if (!cifsInode)
3023 		return true;
3024 
3025 	if (is_inode_writable(cifsInode) ||
3026 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3027 		/* This inode is open for write at least once */
3028 		struct cifs_sb_info *cifs_sb;
3029 
3030 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3031 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3032 			/* since no page cache to corrupt on directio
3033 			we can change size safely */
3034 			return true;
3035 		}
3036 
3037 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3038 			return true;
3039 
3040 		return false;
3041 	} else
3042 		return true;
3043 }
3044 
cifs_oplock_break(struct work_struct * work)3045 void cifs_oplock_break(struct work_struct *work)
3046 {
3047 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3048 						  oplock_break);
3049 	struct inode *inode = d_inode(cfile->dentry);
3050 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3051 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3052 	struct cifs_tcon *tcon;
3053 	struct TCP_Server_Info *server;
3054 	struct tcon_link *tlink;
3055 	int rc = 0;
3056 	bool purge_cache = false, oplock_break_cancelled;
3057 	__u64 persistent_fid, volatile_fid;
3058 	__u16 net_fid;
3059 
3060 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3061 			TASK_UNINTERRUPTIBLE);
3062 
3063 	tlink = cifs_sb_tlink(cifs_sb);
3064 	if (IS_ERR(tlink))
3065 		goto out;
3066 	tcon = tlink_tcon(tlink);
3067 	server = tcon->ses->server;
3068 
3069 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3070 				      cfile->oplock_epoch, &purge_cache);
3071 
3072 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3073 						cifs_has_mand_locks(cinode)) {
3074 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3075 			 inode);
3076 		cinode->oplock = 0;
3077 	}
3078 
3079 	if (inode && S_ISREG(inode->i_mode)) {
3080 		if (CIFS_CACHE_READ(cinode))
3081 			break_lease(inode, O_RDONLY);
3082 		else
3083 			break_lease(inode, O_WRONLY);
3084 		rc = filemap_fdatawrite(inode->i_mapping);
3085 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3086 			rc = filemap_fdatawait(inode->i_mapping);
3087 			mapping_set_error(inode->i_mapping, rc);
3088 			cifs_zap_mapping(inode);
3089 		}
3090 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3091 		if (CIFS_CACHE_WRITE(cinode))
3092 			goto oplock_break_ack;
3093 	}
3094 
3095 	rc = cifs_push_locks(cfile);
3096 	if (rc)
3097 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3098 
3099 oplock_break_ack:
3100 	/*
3101 	 * When oplock break is received and there are no active
3102 	 * file handles but cached, then schedule deferred close immediately.
3103 	 * So, new open will not use cached handle.
3104 	 */
3105 
3106 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3107 		cifs_close_deferred_file(cinode);
3108 
3109 	persistent_fid = cfile->fid.persistent_fid;
3110 	volatile_fid = cfile->fid.volatile_fid;
3111 	net_fid = cfile->fid.netfid;
3112 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3113 
3114 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3115 	/*
3116 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3117 	 * an acknowledgment to be sent when the file has already been closed.
3118 	 */
3119 	spin_lock(&cinode->open_file_lock);
3120 	/* check list empty since can race with kill_sb calling tree disconnect */
3121 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3122 		spin_unlock(&cinode->open_file_lock);
3123 		rc = server->ops->oplock_response(tcon, persistent_fid,
3124 						  volatile_fid, net_fid, cinode);
3125 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3126 	} else
3127 		spin_unlock(&cinode->open_file_lock);
3128 
3129 	cifs_put_tlink(tlink);
3130 out:
3131 	cifs_done_oplock_break(cinode);
3132 }
3133 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3134 static int cifs_swap_activate(struct swap_info_struct *sis,
3135 			      struct file *swap_file, sector_t *span)
3136 {
3137 	struct cifsFileInfo *cfile = swap_file->private_data;
3138 	struct inode *inode = swap_file->f_mapping->host;
3139 	unsigned long blocks;
3140 	long long isize;
3141 
3142 	cifs_dbg(FYI, "swap activate\n");
3143 
3144 	if (!swap_file->f_mapping->a_ops->swap_rw)
3145 		/* Cannot support swap */
3146 		return -EINVAL;
3147 
3148 	spin_lock(&inode->i_lock);
3149 	blocks = inode->i_blocks;
3150 	isize = inode->i_size;
3151 	spin_unlock(&inode->i_lock);
3152 	if (blocks*512 < isize) {
3153 		pr_warn("swap activate: swapfile has holes\n");
3154 		return -EINVAL;
3155 	}
3156 	*span = sis->pages;
3157 
3158 	pr_warn_once("Swap support over SMB3 is experimental\n");
3159 
3160 	/*
3161 	 * TODO: consider adding ACL (or documenting how) to prevent other
3162 	 * users (on this or other systems) from reading it
3163 	 */
3164 
3165 
3166 	/* TODO: add sk_set_memalloc(inet) or similar */
3167 
3168 	if (cfile)
3169 		cfile->swapfile = true;
3170 	/*
3171 	 * TODO: Since file already open, we can't open with DENY_ALL here
3172 	 * but we could add call to grab a byte range lock to prevent others
3173 	 * from reading or writing the file
3174 	 */
3175 
3176 	sis->flags |= SWP_FS_OPS;
3177 	return add_swap_extent(sis, 0, sis->max, 0);
3178 }
3179 
cifs_swap_deactivate(struct file * file)3180 static void cifs_swap_deactivate(struct file *file)
3181 {
3182 	struct cifsFileInfo *cfile = file->private_data;
3183 
3184 	cifs_dbg(FYI, "swap deactivate\n");
3185 
3186 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3187 
3188 	if (cfile)
3189 		cfile->swapfile = false;
3190 
3191 	/* do we need to unpin (or unlock) the file */
3192 }
3193 
3194 /**
3195  * cifs_swap_rw - SMB3 address space operation for swap I/O
3196  * @iocb: target I/O control block
3197  * @iter: I/O buffer
3198  *
3199  * Perform IO to the swap-file.  This is much like direct IO.
3200  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3201 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3202 {
3203 	ssize_t ret;
3204 
3205 	if (iov_iter_rw(iter) == READ)
3206 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3207 	else
3208 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3209 	if (ret < 0)
3210 		return ret;
3211 	return 0;
3212 }
3213 
3214 const struct address_space_operations cifs_addr_ops = {
3215 	.read_folio	= netfs_read_folio,
3216 	.readahead	= netfs_readahead,
3217 	.writepages	= netfs_writepages,
3218 	.dirty_folio	= netfs_dirty_folio,
3219 	.release_folio	= netfs_release_folio,
3220 	.direct_IO	= noop_direct_IO,
3221 	.invalidate_folio = netfs_invalidate_folio,
3222 	.migrate_folio	= filemap_migrate_folio,
3223 	/*
3224 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3225 	 * helper if needed
3226 	 */
3227 	.swap_activate	= cifs_swap_activate,
3228 	.swap_deactivate = cifs_swap_deactivate,
3229 	.swap_rw = cifs_swap_rw,
3230 };
3231 
3232 /*
3233  * cifs_readahead requires the server to support a buffer large enough to
3234  * contain the header plus one complete page of data.  Otherwise, we need
3235  * to leave cifs_readahead out of the address space operations.
3236  */
3237 const struct address_space_operations cifs_addr_ops_smallbuf = {
3238 	.read_folio	= netfs_read_folio,
3239 	.writepages	= netfs_writepages,
3240 	.dirty_folio	= netfs_dirty_folio,
3241 	.release_folio	= netfs_release_folio,
3242 	.invalidate_folio = netfs_invalidate_folio,
3243 	.migrate_folio	= filemap_migrate_folio,
3244 };
3245