xref: /linux/fs/smb/client/file.c (revision 334fbe734e687404f346eba7d5d96ed2b44d35ab)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/filelock.h>
14 #include <linux/backing-dev.h>
15 #include <linux/stat.h>
16 #include <linux/fcntl.h>
17 #include <linux/pagemap.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifsglob.h"
28 #include "cifsproto.h"
29 #include "smb2proto.h"
30 #include "cifs_unicode.h"
31 #include "cifs_debug.h"
32 #include "cifs_fs_sb.h"
33 #include "fscache.h"
34 #include "smbdirect.h"
35 #include "fs_context.h"
36 #include "cifs_ioctl.h"
37 #include "cached_dir.h"
38 #include <trace/events/netfs.h>
39 
40 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
41 
42 /*
43  * Prepare a subrequest to upload to the server.  We need to allocate credits
44  * so that we know the maximum amount of data that we can include in it.
45  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)46 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
47 {
48 	struct cifs_io_subrequest *wdata =
49 		container_of(subreq, struct cifs_io_subrequest, subreq);
50 	struct cifs_io_request *req = wdata->req;
51 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
52 	struct TCP_Server_Info *server;
53 	struct cifsFileInfo *open_file = req->cfile;
54 	struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
55 	size_t wsize = req->rreq.wsize;
56 	int rc;
57 
58 	if (!wdata->have_xid) {
59 		wdata->xid = get_xid();
60 		wdata->have_xid = true;
61 	}
62 
63 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 	wdata->server = server;
65 
66 	if (cifs_sb->ctx->wsize == 0)
67 		cifs_negotiate_wsize(server, cifs_sb->ctx,
68 				     tlink_tcon(req->cfile->tlink));
69 
70 retry:
71 	if (open_file->invalidHandle) {
72 		rc = cifs_reopen_file(open_file, false);
73 		if (rc < 0) {
74 			if (rc == -EAGAIN)
75 				goto retry;
76 			subreq->error = rc;
77 			return netfs_prepare_write_failed(subreq);
78 		}
79 	}
80 
81 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
82 					   &wdata->credits);
83 	if (rc < 0) {
84 		subreq->error = rc;
85 		return netfs_prepare_write_failed(subreq);
86 	}
87 
88 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
89 	wdata->credits.rreq_debug_index = subreq->debug_index;
90 	wdata->credits.in_flight_check = 1;
91 	trace_smb3_rw_credits(wdata->rreq->debug_id,
92 			      wdata->subreq.debug_index,
93 			      wdata->credits.value,
94 			      server->credits, server->in_flight,
95 			      wdata->credits.value,
96 			      cifs_trace_rw_credits_write_prepare);
97 
98 #ifdef CONFIG_CIFS_SMB_DIRECT
99 	if (server->smbd_conn) {
100 		const struct smbdirect_socket_parameters *sp =
101 			smbd_get_parameters(server->smbd_conn);
102 
103 		stream->sreq_max_segs = sp->max_frmr_depth;
104 	}
105 #endif
106 }
107 
108 /*
109  * Issue a subrequest to upload to the server.
110  */
cifs_issue_write(struct netfs_io_subrequest * subreq)111 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
112 {
113 	struct cifs_io_subrequest *wdata =
114 		container_of(subreq, struct cifs_io_subrequest, subreq);
115 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
116 	int rc;
117 
118 	if (cifs_forced_shutdown(sbi)) {
119 		rc = smb_EIO(smb_eio_trace_forced_shutdown);
120 		goto fail;
121 	}
122 
123 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
124 	if (rc)
125 		goto fail;
126 
127 	rc = -EAGAIN;
128 	if (wdata->req->cfile->invalidHandle)
129 		goto fail;
130 
131 	wdata->server->ops->async_writev(wdata);
132 out:
133 	return;
134 
135 fail:
136 	if (rc == -EAGAIN)
137 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
138 	else
139 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
140 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
141 	cifs_write_subrequest_terminated(wdata, rc);
142 	goto out;
143 }
144 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)145 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
146 {
147 	cifs_invalidate_cache(wreq->inode, 0);
148 }
149 
150 /*
151  * Negotiate the size of a read operation on behalf of the netfs library.
152  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)153 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
154 {
155 	struct netfs_io_request *rreq = subreq->rreq;
156 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
157 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
158 	struct TCP_Server_Info *server;
159 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
160 	size_t size;
161 	int rc = 0;
162 
163 	if (!rdata->have_xid) {
164 		rdata->xid = get_xid();
165 		rdata->have_xid = true;
166 	}
167 
168 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
169 	rdata->server = server;
170 
171 	if (cifs_sb->ctx->rsize == 0)
172 		cifs_negotiate_rsize(server, cifs_sb->ctx,
173 				     tlink_tcon(req->cfile->tlink));
174 
175 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
176 					   &size, &rdata->credits);
177 	if (rc)
178 		return rc;
179 
180 	rreq->io_streams[0].sreq_max_len = size;
181 
182 	rdata->credits.in_flight_check = 1;
183 	rdata->credits.rreq_debug_id = rreq->debug_id;
184 	rdata->credits.rreq_debug_index = subreq->debug_index;
185 
186 	trace_smb3_rw_credits(rdata->rreq->debug_id,
187 			      rdata->subreq.debug_index,
188 			      rdata->credits.value,
189 			      server->credits, server->in_flight, 0,
190 			      cifs_trace_rw_credits_read_submit);
191 
192 #ifdef CONFIG_CIFS_SMB_DIRECT
193 	if (server->smbd_conn) {
194 		const struct smbdirect_socket_parameters *sp =
195 			smbd_get_parameters(server->smbd_conn);
196 
197 		rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
198 	}
199 #endif
200 	return 0;
201 }
202 
203 /*
204  * Issue a read operation on behalf of the netfs helper functions.  We're asked
205  * to make a read of a certain size at a point in the file.  We are permitted
206  * to only read a portion of that, but as long as we read something, the netfs
207  * helper will call us again so that we can issue another read.
208  */
cifs_issue_read(struct netfs_io_subrequest * subreq)209 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
210 {
211 	struct netfs_io_request *rreq = subreq->rreq;
212 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
213 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
214 	struct TCP_Server_Info *server = rdata->server;
215 	int rc = 0;
216 
217 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
218 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
219 		 subreq->transferred, subreq->len);
220 
221 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
222 	if (rc)
223 		goto failed;
224 
225 	if (req->cfile->invalidHandle) {
226 		do {
227 			rc = cifs_reopen_file(req->cfile, true);
228 		} while (rc == -EAGAIN);
229 		if (rc)
230 			goto failed;
231 	}
232 
233 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
234 	    subreq->rreq->origin != NETFS_DIO_READ)
235 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
236 
237 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
238 	rc = rdata->server->ops->async_readv(rdata);
239 	if (rc)
240 		goto failed;
241 	return;
242 
243 failed:
244 	subreq->error = rc;
245 	netfs_read_subreq_terminated(subreq);
246 }
247 
248 /*
249  * Writeback calls this when it finds a folio that needs uploading.  This isn't
250  * called if writeback only has copy-to-cache to deal with.
251  */
cifs_begin_writeback(struct netfs_io_request * wreq)252 static void cifs_begin_writeback(struct netfs_io_request *wreq)
253 {
254 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
255 	int ret;
256 
257 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_ANY, &req->cfile);
258 	if (ret) {
259 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
260 		return;
261 	}
262 
263 	wreq->io_streams[0].avail = true;
264 }
265 
266 /*
267  * Initialise a request.
268  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)269 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
270 {
271 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
272 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode);
273 	struct cifsFileInfo *open_file = NULL;
274 
275 	rreq->rsize = cifs_sb->ctx->rsize;
276 	rreq->wsize = cifs_sb->ctx->wsize;
277 	req->pid = current->tgid; // Ummm...  This may be a workqueue
278 
279 	if (file) {
280 		open_file = file->private_data;
281 		rreq->netfs_priv = file->private_data;
282 		req->cfile = cifsFileInfo_get(open_file);
283 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RWPIDFORWARD)
284 			req->pid = req->cfile->pid;
285 	} else if (rreq->origin != NETFS_WRITEBACK) {
286 		WARN_ON_ONCE(1);
287 		return smb_EIO1(smb_eio_trace_not_netfs_writeback, rreq->origin);
288 	}
289 
290 	return 0;
291 }
292 
293 /*
294  * Completion of a request operation.
295  */
cifs_rreq_done(struct netfs_io_request * rreq)296 static void cifs_rreq_done(struct netfs_io_request *rreq)
297 {
298 	struct timespec64 atime, mtime;
299 	struct inode *inode = rreq->inode;
300 
301 	/* we do not want atime to be less than mtime, it broke some apps */
302 	atime = inode_set_atime_to_ts(inode, current_time(inode));
303 	mtime = inode_get_mtime(inode);
304 	if (timespec64_compare(&atime, &mtime))
305 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
306 }
307 
cifs_free_request(struct netfs_io_request * rreq)308 static void cifs_free_request(struct netfs_io_request *rreq)
309 {
310 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
311 
312 	if (req->cfile)
313 		cifsFileInfo_put(req->cfile);
314 }
315 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)316 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
317 {
318 	struct cifs_io_subrequest *rdata =
319 		container_of(subreq, struct cifs_io_subrequest, subreq);
320 	int rc = subreq->error;
321 
322 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
323 #ifdef CONFIG_CIFS_SMB_DIRECT
324 		if (rdata->mr) {
325 			smbd_deregister_mr(rdata->mr);
326 			rdata->mr = NULL;
327 		}
328 #endif
329 	}
330 
331 	if (rdata->credits.value != 0) {
332 		trace_smb3_rw_credits(rdata->rreq->debug_id,
333 				      rdata->subreq.debug_index,
334 				      rdata->credits.value,
335 				      rdata->server ? rdata->server->credits : 0,
336 				      rdata->server ? rdata->server->in_flight : 0,
337 				      -rdata->credits.value,
338 				      cifs_trace_rw_credits_free_subreq);
339 		if (rdata->server)
340 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
341 		else
342 			rdata->credits.value = 0;
343 	}
344 
345 	if (rdata->have_xid)
346 		free_xid(rdata->xid);
347 }
348 
349 const struct netfs_request_ops cifs_req_ops = {
350 	.request_pool		= &cifs_io_request_pool,
351 	.subrequest_pool	= &cifs_io_subrequest_pool,
352 	.init_request		= cifs_init_request,
353 	.free_request		= cifs_free_request,
354 	.free_subrequest	= cifs_free_subrequest,
355 	.prepare_read		= cifs_prepare_read,
356 	.issue_read		= cifs_issue_read,
357 	.done			= cifs_rreq_done,
358 	.begin_writeback	= cifs_begin_writeback,
359 	.prepare_write		= cifs_prepare_write,
360 	.issue_write		= cifs_issue_write,
361 	.invalidate_cache	= cifs_netfs_invalidate_cache,
362 };
363 
364 /*
365  * Mark as invalid, all open files on tree connections since they
366  * were closed when session to server was lost.
367  */
368 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)369 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
370 {
371 	struct cifsFileInfo *open_file = NULL;
372 	struct list_head *tmp;
373 	struct list_head *tmp1;
374 
375 	/* only send once per connect */
376 	spin_lock(&tcon->tc_lock);
377 	if (tcon->need_reconnect)
378 		tcon->status = TID_NEED_RECON;
379 
380 	if (tcon->status != TID_NEED_RECON) {
381 		spin_unlock(&tcon->tc_lock);
382 		return;
383 	}
384 	tcon->status = TID_IN_FILES_INVALIDATE;
385 	spin_unlock(&tcon->tc_lock);
386 
387 	/* list all files open on tree connection and mark them invalid */
388 	spin_lock(&tcon->open_file_lock);
389 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
390 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
391 		open_file->invalidHandle = true;
392 		open_file->oplock_break_cancelled = true;
393 	}
394 	spin_unlock(&tcon->open_file_lock);
395 
396 	invalidate_all_cached_dirs(tcon);
397 	spin_lock(&tcon->tc_lock);
398 	if (tcon->status == TID_IN_FILES_INVALIDATE)
399 		tcon->status = TID_NEED_TCON;
400 	spin_unlock(&tcon->tc_lock);
401 
402 	/*
403 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
404 	 * to this tcon.
405 	 */
406 }
407 
cifs_convert_flags(unsigned int oflags,int rdwr_for_fscache)408 static inline int cifs_convert_flags(unsigned int oflags, int rdwr_for_fscache)
409 {
410 	int flags = 0;
411 
412 	if (oflags & O_TMPFILE)
413 		flags |= DELETE;
414 
415 	if ((oflags & O_ACCMODE) == O_RDONLY)
416 		return flags | GENERIC_READ;
417 	if ((oflags & O_ACCMODE) == O_WRONLY) {
418 		return flags | (rdwr_for_fscache == 1 ?
419 				(GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE);
420 	}
421 	if ((oflags & O_ACCMODE) == O_RDWR) {
422 		/* GENERIC_ALL is too much permission to request
423 		   can cause unnecessary access denied on create */
424 		/* return GENERIC_ALL; */
425 		return flags | GENERIC_READ | GENERIC_WRITE;
426 	}
427 
428 	return flags | READ_CONTROL | FILE_WRITE_ATTRIBUTES |
429 		FILE_READ_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA |
430 		FILE_WRITE_DATA | FILE_READ_DATA;
431 }
432 
433 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)434 static u32 cifs_posix_convert_flags(unsigned int flags)
435 {
436 	u32 posix_flags = 0;
437 
438 	if ((flags & O_ACCMODE) == O_RDONLY)
439 		posix_flags = SMB_O_RDONLY;
440 	else if ((flags & O_ACCMODE) == O_WRONLY)
441 		posix_flags = SMB_O_WRONLY;
442 	else if ((flags & O_ACCMODE) == O_RDWR)
443 		posix_flags = SMB_O_RDWR;
444 
445 	if (flags & O_CREAT) {
446 		posix_flags |= SMB_O_CREAT;
447 		if (flags & O_EXCL)
448 			posix_flags |= SMB_O_EXCL;
449 	} else if (flags & O_EXCL)
450 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
451 			 current->comm, current->tgid);
452 
453 	if (flags & O_TRUNC)
454 		posix_flags |= SMB_O_TRUNC;
455 	/* be safe and imply O_SYNC for O_DSYNC */
456 	if (flags & O_DSYNC)
457 		posix_flags |= SMB_O_SYNC;
458 	if (flags & O_DIRECTORY)
459 		posix_flags |= SMB_O_DIRECTORY;
460 	if (flags & O_NOFOLLOW)
461 		posix_flags |= SMB_O_NOFOLLOW;
462 	if (flags & O_DIRECT)
463 		posix_flags |= SMB_O_DIRECT;
464 
465 	return posix_flags;
466 }
467 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
468 
cifs_get_disposition(unsigned int flags)469 static inline int cifs_get_disposition(unsigned int flags)
470 {
471 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
472 		return FILE_CREATE;
473 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
474 		return FILE_OVERWRITE_IF;
475 	else if ((flags & O_CREAT) == O_CREAT)
476 		return FILE_OPEN_IF;
477 	else if ((flags & O_TRUNC) == O_TRUNC)
478 		return FILE_OVERWRITE;
479 	else
480 		return FILE_OPEN;
481 }
482 
483 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)484 int cifs_posix_open(const char *full_path, struct inode **pinode,
485 			struct super_block *sb, int mode, unsigned int f_flags,
486 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
487 {
488 	int rc;
489 	FILE_UNIX_BASIC_INFO *presp_data;
490 	__u32 posix_flags = 0;
491 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
492 	struct cifs_fattr fattr;
493 	struct tcon_link *tlink;
494 	struct cifs_tcon *tcon;
495 
496 	cifs_dbg(FYI, "posix open %s\n", full_path);
497 
498 	presp_data = kzalloc_obj(FILE_UNIX_BASIC_INFO);
499 	if (presp_data == NULL)
500 		return -ENOMEM;
501 
502 	tlink = cifs_sb_tlink(cifs_sb);
503 	if (IS_ERR(tlink)) {
504 		rc = PTR_ERR(tlink);
505 		goto posix_open_ret;
506 	}
507 
508 	tcon = tlink_tcon(tlink);
509 	mode &= ~current_umask();
510 
511 	posix_flags = cifs_posix_convert_flags(f_flags);
512 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
513 			     poplock, full_path, cifs_sb->local_nls,
514 			     cifs_remap(cifs_sb));
515 	cifs_put_tlink(tlink);
516 
517 	if (rc)
518 		goto posix_open_ret;
519 
520 	if (presp_data->Type == cpu_to_le32(-1))
521 		goto posix_open_ret; /* open ok, caller does qpathinfo */
522 
523 	if (!pinode)
524 		goto posix_open_ret; /* caller does not need info */
525 
526 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
527 
528 	/* get new inode and set it up */
529 	if (*pinode == NULL) {
530 		cifs_fill_uniqueid(sb, &fattr);
531 		*pinode = cifs_iget(sb, &fattr);
532 		if (!*pinode) {
533 			rc = -ENOMEM;
534 			goto posix_open_ret;
535 		}
536 	} else {
537 		cifs_revalidate_mapping(*pinode);
538 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
539 	}
540 
541 posix_open_ret:
542 	kfree(presp_data);
543 	return rc;
544 }
545 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
546 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)547 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
548 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
549 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
550 {
551 	int rc;
552 	int desired_access;
553 	int disposition;
554 	int create_options = CREATE_NOT_DIR;
555 	struct TCP_Server_Info *server = tcon->ses->server;
556 	struct cifs_open_parms oparms;
557 	int rdwr_for_fscache = 0;
558 
559 	if (!server->ops->open)
560 		return -ENOSYS;
561 
562 	/* If we're caching, we need to be able to fill in around partial writes. */
563 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
564 		rdwr_for_fscache = 1;
565 
566 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
567 
568 /*********************************************************************
569  *  open flag mapping table:
570  *
571  *	POSIX Flag            CIFS Disposition
572  *	----------            ----------------
573  *	O_CREAT               FILE_OPEN_IF
574  *	O_CREAT | O_EXCL      FILE_CREATE
575  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
576  *	O_TRUNC               FILE_OVERWRITE
577  *	none of the above     FILE_OPEN
578  *
579  *	Note that there is not a direct match between disposition
580  *	FILE_SUPERSEDE (ie create whether or not file exists although
581  *	O_CREAT | O_TRUNC is similar but truncates the existing
582  *	file rather than creating a new file as FILE_SUPERSEDE does
583  *	(which uses the attributes / metadata passed in on open call)
584  *?
585  *?  O_SYNC is a reasonable match to CIFS writethrough flag
586  *?  and the read write flags match reasonably.  O_LARGEFILE
587  *?  is irrelevant because largefile support is always used
588  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
589  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
590  *********************************************************************/
591 
592 	disposition = cifs_get_disposition(f_flags);
593 	/* BB pass O_SYNC flag through on file attributes .. BB */
594 	create_options |= cifs_open_create_options(f_flags, create_options);
595 
596 retry_open:
597 	oparms = (struct cifs_open_parms) {
598 		.tcon = tcon,
599 		.cifs_sb = cifs_sb,
600 		.desired_access = desired_access,
601 		.create_options = cifs_create_options(cifs_sb, create_options),
602 		.disposition = disposition,
603 		.path = full_path,
604 		.fid = fid,
605 	};
606 
607 	rc = server->ops->open(xid, &oparms, oplock, buf);
608 	if (rc) {
609 		if (rc == -EACCES && rdwr_for_fscache == 1) {
610 			desired_access = cifs_convert_flags(f_flags, 0);
611 			rdwr_for_fscache = 2;
612 			goto retry_open;
613 		}
614 		return rc;
615 	}
616 	if (rdwr_for_fscache == 2)
617 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
618 
619 	/* TODO: Add support for calling posix query info but with passing in fid */
620 	if (tcon->unix_ext)
621 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
622 					      xid);
623 	else
624 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
625 					 xid, fid);
626 
627 	if (rc) {
628 		server->ops->close(xid, tcon, fid);
629 		if (rc == -ESTALE)
630 			rc = -EOPENSTALE;
631 	}
632 
633 	return rc;
634 }
635 
636 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)637 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
638 {
639 	struct cifs_fid_locks *cur;
640 	bool has_locks = false;
641 
642 	down_read(&cinode->lock_sem);
643 	list_for_each_entry(cur, &cinode->llist, llist) {
644 		if (!list_empty(&cur->locks)) {
645 			has_locks = true;
646 			break;
647 		}
648 	}
649 	up_read(&cinode->lock_sem);
650 	return has_locks;
651 }
652 
653 void
cifs_down_write(struct rw_semaphore * sem)654 cifs_down_write(struct rw_semaphore *sem)
655 {
656 	while (!down_write_trylock(sem))
657 		msleep(10);
658 }
659 
660 static void cifsFileInfo_put_work(struct work_struct *work);
661 void serverclose_work(struct work_struct *work);
662 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)663 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
664 				       struct tcon_link *tlink, __u32 oplock,
665 				       const char *symlink_target)
666 {
667 	struct dentry *dentry = file_dentry(file);
668 	struct inode *inode = d_inode(dentry);
669 	struct cifsInodeInfo *cinode = CIFS_I(inode);
670 	struct cifsFileInfo *cfile;
671 	struct cifs_fid_locks *fdlocks;
672 	struct cifs_tcon *tcon = tlink_tcon(tlink);
673 	struct TCP_Server_Info *server = tcon->ses->server;
674 
675 	cfile = kzalloc_obj(struct cifsFileInfo);
676 	if (cfile == NULL)
677 		return cfile;
678 
679 	fdlocks = kzalloc_obj(struct cifs_fid_locks);
680 	if (!fdlocks) {
681 		kfree(cfile);
682 		return NULL;
683 	}
684 
685 	if (symlink_target) {
686 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
687 		if (!cfile->symlink_target) {
688 			kfree(fdlocks);
689 			kfree(cfile);
690 			return NULL;
691 		}
692 	}
693 
694 	INIT_LIST_HEAD(&fdlocks->locks);
695 	fdlocks->cfile = cfile;
696 	cfile->llist = fdlocks;
697 
698 	cfile->count = 1;
699 	cfile->pid = current->tgid;
700 	cfile->uid = current_fsuid();
701 	cfile->dentry = dget(dentry);
702 	cfile->f_flags = file->f_flags;
703 	cfile->invalidHandle = false;
704 	cfile->deferred_close_scheduled = false;
705 	cfile->status_file_deleted = file->f_flags & O_TMPFILE;
706 	cfile->tlink = cifs_get_tlink(tlink);
707 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
708 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
709 	INIT_WORK(&cfile->serverclose, serverclose_work);
710 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
711 	mutex_init(&cfile->fh_mutex);
712 	spin_lock_init(&cfile->file_info_lock);
713 
714 	/*
715 	 * If the server returned a read oplock and we have mandatory brlocks,
716 	 * set oplock level to None.
717 	 */
718 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
719 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
720 		oplock = 0;
721 	}
722 
723 	cifs_down_write(&cinode->lock_sem);
724 	list_add(&fdlocks->llist, &cinode->llist);
725 	up_write(&cinode->lock_sem);
726 
727 	spin_lock(&tcon->open_file_lock);
728 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
729 		oplock = fid->pending_open->oplock;
730 	list_del(&fid->pending_open->olist);
731 
732 	list_add(&cfile->tlist, &tcon->openFileList);
733 	atomic_inc(&tcon->num_local_opens);
734 
735 	/* if readable file instance put first in list*/
736 	spin_lock(&cinode->open_file_lock);
737 	if (file->f_flags & O_TMPFILE)
738 		set_bit(CIFS_INO_TMPFILE, &cinode->flags);
739 	fid->purge_cache = false;
740 	server->ops->set_fid(cfile, fid, oplock);
741 
742 	if (file->f_mode & FMODE_READ)
743 		list_add(&cfile->flist, &cinode->openFileList);
744 	else
745 		list_add_tail(&cfile->flist, &cinode->openFileList);
746 	spin_unlock(&cinode->open_file_lock);
747 	spin_unlock(&tcon->open_file_lock);
748 
749 	if (fid->purge_cache)
750 		cifs_zap_mapping(inode);
751 
752 	file->private_data = cfile;
753 	return cfile;
754 }
755 
756 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)757 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
758 {
759 	spin_lock(&cifs_file->file_info_lock);
760 	cifsFileInfo_get_locked(cifs_file);
761 	spin_unlock(&cifs_file->file_info_lock);
762 	return cifs_file;
763 }
764 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)765 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
766 {
767 	struct inode *inode = d_inode(cifs_file->dentry);
768 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
769 	struct cifsLockInfo *li, *tmp;
770 
771 	/*
772 	 * Delete any outstanding lock records. We'll lose them when the file
773 	 * is closed anyway.
774 	 */
775 	cifs_down_write(&cifsi->lock_sem);
776 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
777 		list_del(&li->llist);
778 		cifs_del_lock_waiters(li);
779 		kfree(li);
780 	}
781 	list_del(&cifs_file->llist->llist);
782 	kfree(cifs_file->llist);
783 	up_write(&cifsi->lock_sem);
784 
785 	cifs_put_tlink(cifs_file->tlink);
786 	dput(cifs_file->dentry);
787 	kfree(cifs_file->symlink_target);
788 	kfree(cifs_file);
789 }
790 
cifsFileInfo_put_work(struct work_struct * work)791 static void cifsFileInfo_put_work(struct work_struct *work)
792 {
793 	struct cifsFileInfo *cifs_file = container_of(work,
794 			struct cifsFileInfo, put);
795 
796 	cifsFileInfo_put_final(cifs_file);
797 }
798 
serverclose_work(struct work_struct * work)799 void serverclose_work(struct work_struct *work)
800 {
801 	struct cifsFileInfo *cifs_file = container_of(work,
802 			struct cifsFileInfo, serverclose);
803 
804 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
805 
806 	struct TCP_Server_Info *server = tcon->ses->server;
807 	int rc = 0;
808 	int retries = 0;
809 	int MAX_RETRIES = 4;
810 
811 	do {
812 		if (server->ops->close_getattr)
813 			rc = server->ops->close_getattr(0, tcon, cifs_file);
814 		else if (server->ops->close)
815 			rc = server->ops->close(0, tcon, &cifs_file->fid);
816 
817 		if (rc == -EBUSY || rc == -EAGAIN) {
818 			retries++;
819 			msleep(250);
820 		}
821 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
822 	);
823 
824 	if (retries == MAX_RETRIES)
825 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
826 
827 	if (cifs_file->offload)
828 		queue_work(fileinfo_put_wq, &cifs_file->put);
829 	else
830 		cifsFileInfo_put_final(cifs_file);
831 }
832 
833 /**
834  * cifsFileInfo_put - release a reference of file priv data
835  *
836  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
837  *
838  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
839  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)840 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
841 {
842 	_cifsFileInfo_put(cifs_file, true, true);
843 }
844 
845 /**
846  * _cifsFileInfo_put - release a reference of file priv data
847  *
848  * This may involve closing the filehandle @cifs_file out on the
849  * server. Must be called without holding tcon->open_file_lock,
850  * cinode->open_file_lock and cifs_file->file_info_lock.
851  *
852  * If @wait_for_oplock_handler is true and we are releasing the last
853  * reference, wait for any running oplock break handler of the file
854  * and cancel any pending one.
855  *
856  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
857  * @wait_oplock_handler: must be false if called from oplock_break_handler
858  * @offload:	not offloaded on close and oplock breaks
859  *
860  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)861 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
862 		       bool wait_oplock_handler, bool offload)
863 {
864 	struct inode *inode = d_inode(cifs_file->dentry);
865 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
866 	struct TCP_Server_Info *server = tcon->ses->server;
867 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
868 	struct super_block *sb = inode->i_sb;
869 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
870 	struct cifs_fid fid = {};
871 	struct cifs_pending_open open;
872 	bool oplock_break_cancelled;
873 	bool serverclose_offloaded = false;
874 
875 	spin_lock(&tcon->open_file_lock);
876 	spin_lock(&cifsi->open_file_lock);
877 	spin_lock(&cifs_file->file_info_lock);
878 
879 	cifs_file->offload = offload;
880 	if (--cifs_file->count > 0) {
881 		spin_unlock(&cifs_file->file_info_lock);
882 		spin_unlock(&cifsi->open_file_lock);
883 		spin_unlock(&tcon->open_file_lock);
884 		return;
885 	}
886 	spin_unlock(&cifs_file->file_info_lock);
887 
888 	if (server->ops->get_lease_key)
889 		server->ops->get_lease_key(inode, &fid);
890 
891 	/* store open in pending opens to make sure we don't miss lease break */
892 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
893 
894 	/* remove it from the lists */
895 	list_del(&cifs_file->flist);
896 	list_del(&cifs_file->tlist);
897 	atomic_dec(&tcon->num_local_opens);
898 
899 	if (list_empty(&cifsi->openFileList)) {
900 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
901 			 d_inode(cifs_file->dentry));
902 		/*
903 		 * In strict cache mode we need invalidate mapping on the last
904 		 * close  because it may cause a error when we open this file
905 		 * again and get at least level II oplock.
906 		 */
907 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_STRICT_IO)
908 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
909 		cifs_set_oplock_level(cifsi, 0);
910 	}
911 
912 	spin_unlock(&cifsi->open_file_lock);
913 	spin_unlock(&tcon->open_file_lock);
914 
915 	oplock_break_cancelled = wait_oplock_handler ?
916 		cancel_work_sync(&cifs_file->oplock_break) : false;
917 
918 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
919 		struct TCP_Server_Info *server = tcon->ses->server;
920 		unsigned int xid;
921 		int rc = 0;
922 
923 		xid = get_xid();
924 		if (server->ops->close_getattr)
925 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
926 		else if (server->ops->close)
927 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
928 		_free_xid(xid);
929 
930 		if (rc == -EBUSY || rc == -EAGAIN) {
931 			// Server close failed, hence offloading it as an async op
932 			queue_work(serverclose_wq, &cifs_file->serverclose);
933 			serverclose_offloaded = true;
934 		}
935 	}
936 
937 	if (oplock_break_cancelled)
938 		cifs_done_oplock_break(cifsi);
939 
940 	cifs_del_pending_open(&open);
941 
942 	// if serverclose has been offloaded to wq (on failure), it will
943 	// handle offloading put as well. If serverclose not offloaded,
944 	// we need to handle offloading put here.
945 	if (!serverclose_offloaded) {
946 		if (offload)
947 			queue_work(fileinfo_put_wq, &cifs_file->put);
948 		else
949 			cifsFileInfo_put_final(cifs_file);
950 	}
951 }
952 
cifs_file_flush(const unsigned int xid,struct inode * inode,struct cifsFileInfo * cfile)953 int cifs_file_flush(const unsigned int xid, struct inode *inode,
954 		    struct cifsFileInfo *cfile)
955 {
956 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
957 	struct cifs_tcon *tcon;
958 	int rc;
959 
960 	if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)
961 		return 0;
962 
963 	if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
964 		tcon = tlink_tcon(cfile->tlink);
965 		return tcon->ses->server->ops->flush(xid, tcon,
966 						     &cfile->fid);
967 	}
968 	rc = cifs_get_writable_file(CIFS_I(inode), FIND_ANY, &cfile);
969 	if (!rc) {
970 		tcon = tlink_tcon(cfile->tlink);
971 		rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
972 		cifsFileInfo_put(cfile);
973 	} else if (rc == -EBADF) {
974 		rc = 0;
975 	}
976 	return rc;
977 }
978 
cifs_do_truncate(const unsigned int xid,struct dentry * dentry)979 static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
980 {
981 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
982 	struct inode *inode = d_inode(dentry);
983 	struct cifsFileInfo *cfile = NULL;
984 	struct TCP_Server_Info *server;
985 	struct cifs_tcon *tcon;
986 	int rc;
987 
988 	rc = filemap_write_and_wait(inode->i_mapping);
989 	if (is_interrupt_error(rc))
990 		return -ERESTARTSYS;
991 	mapping_set_error(inode->i_mapping, rc);
992 
993 	cfile = find_writable_file(cinode, FIND_FSUID_ONLY);
994 	rc = cifs_file_flush(xid, inode, cfile);
995 	if (!rc) {
996 		if (cfile) {
997 			tcon = tlink_tcon(cfile->tlink);
998 			server = tcon->ses->server;
999 			rc = server->ops->set_file_size(xid, tcon,
1000 							cfile, 0, false);
1001 		}
1002 		if (!rc) {
1003 			netfs_resize_file(&cinode->netfs, 0, true);
1004 			cifs_setsize(inode, 0);
1005 		}
1006 	}
1007 	if (cfile)
1008 		cifsFileInfo_put(cfile);
1009 	return rc;
1010 }
1011 
cifs_open(struct inode * inode,struct file * file)1012 int cifs_open(struct inode *inode, struct file *file)
1013 
1014 {
1015 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
1016 	struct cifs_open_info_data data = {};
1017 	struct cifsFileInfo *cfile = NULL;
1018 	struct TCP_Server_Info *server;
1019 	struct cifs_pending_open open;
1020 	bool posix_open_ok = false;
1021 	struct cifs_fid fid = {};
1022 	struct tcon_link *tlink;
1023 	struct cifs_tcon *tcon;
1024 	const char *full_path;
1025 	unsigned int sbflags;
1026 	int rc = -EACCES;
1027 	unsigned int xid;
1028 	__u32 oplock;
1029 	void *page;
1030 
1031 	xid = get_xid();
1032 
1033 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
1034 		free_xid(xid);
1035 		return smb_EIO(smb_eio_trace_forced_shutdown);
1036 	}
1037 
1038 	tlink = cifs_sb_tlink(cifs_sb);
1039 	if (IS_ERR(tlink)) {
1040 		free_xid(xid);
1041 		return PTR_ERR(tlink);
1042 	}
1043 	tcon = tlink_tcon(tlink);
1044 	server = tcon->ses->server;
1045 
1046 	page = alloc_dentry_path();
1047 	full_path = build_path_from_dentry(file_dentry(file), page);
1048 	if (IS_ERR(full_path)) {
1049 		rc = PTR_ERR(full_path);
1050 		goto out;
1051 	}
1052 
1053 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
1054 		 inode, file->f_flags, full_path);
1055 
1056 	sbflags = cifs_sb_flags(cifs_sb);
1057 	if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) {
1058 		if (sbflags & CIFS_MOUNT_NO_BRL)
1059 			file->f_op = &cifs_file_direct_nobrl_ops;
1060 		else
1061 			file->f_op = &cifs_file_direct_ops;
1062 	}
1063 
1064 	if (file->f_flags & O_TRUNC) {
1065 		rc = cifs_do_truncate(xid, file_dentry(file));
1066 		if (rc)
1067 			goto out;
1068 	}
1069 
1070 	/* Get the cached handle as SMB2 close is deferred */
1071 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1072 		rc = __cifs_get_writable_file(CIFS_I(inode),
1073 					      FIND_FSUID_ONLY |
1074 					      FIND_NO_PENDING_DELETE |
1075 					      FIND_OPEN_FLAGS,
1076 					      file->f_flags, &cfile);
1077 	} else {
1078 		cfile = __find_readable_file(CIFS_I(inode),
1079 					     FIND_NO_PENDING_DELETE |
1080 					     FIND_OPEN_FLAGS,
1081 					     file->f_flags);
1082 		rc = cfile ? 0 : -ENOENT;
1083 	}
1084 	if (rc == 0) {
1085 		file->private_data = cfile;
1086 		spin_lock(&CIFS_I(inode)->deferred_lock);
1087 		cifs_del_deferred_close(cfile);
1088 		spin_unlock(&CIFS_I(inode)->deferred_lock);
1089 		goto use_cache;
1090 	}
1091 	/* hard link on the deferred close file */
1092 	rc = cifs_get_hardlink_path(tcon, inode, file);
1093 	if (rc)
1094 		cifs_close_deferred_file(CIFS_I(inode));
1095 
1096 	if (server->oplocks)
1097 		oplock = REQ_OPLOCK;
1098 	else
1099 		oplock = 0;
1100 
1101 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1102 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1103 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1104 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1105 		/* can not refresh inode info since size could be stale */
1106 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1107 				cifs_sb->ctx->file_mode /* ignored */,
1108 				file->f_flags, &oplock, &fid.netfid, xid);
1109 		if (rc == 0) {
1110 			cifs_dbg(FYI, "posix open succeeded\n");
1111 			posix_open_ok = true;
1112 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1113 			if (tcon->ses->serverNOS)
1114 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1115 					 tcon->ses->ip_addr,
1116 					 tcon->ses->serverNOS);
1117 			tcon->broken_posix_open = true;
1118 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1119 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1120 			goto out;
1121 		/*
1122 		 * Else fallthrough to retry open the old way on network i/o
1123 		 * or DFS errors.
1124 		 */
1125 	}
1126 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1127 
1128 	if (server->ops->get_lease_key)
1129 		server->ops->get_lease_key(inode, &fid);
1130 
1131 	cifs_add_pending_open(&fid, tlink, &open);
1132 
1133 	if (!posix_open_ok) {
1134 		if (server->ops->get_lease_key)
1135 			server->ops->get_lease_key(inode, &fid);
1136 
1137 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1138 				  xid, &data);
1139 		if (rc) {
1140 			cifs_del_pending_open(&open);
1141 			goto out;
1142 		}
1143 	}
1144 
1145 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1146 	if (cfile == NULL) {
1147 		if (server->ops->close)
1148 			server->ops->close(xid, tcon, &fid);
1149 		cifs_del_pending_open(&open);
1150 		rc = -ENOMEM;
1151 		goto out;
1152 	}
1153 
1154 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1155 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1156 		/*
1157 		 * Time to set mode which we can not set earlier due to
1158 		 * problems creating new read-only files.
1159 		 */
1160 		struct cifs_unix_set_info_args args = {
1161 			.mode	= inode->i_mode,
1162 			.uid	= INVALID_UID, /* no change */
1163 			.gid	= INVALID_GID, /* no change */
1164 			.ctime	= NO_CHANGE_64,
1165 			.atime	= NO_CHANGE_64,
1166 			.mtime	= NO_CHANGE_64,
1167 			.device	= 0,
1168 		};
1169 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1170 				       cfile->pid);
1171 	}
1172 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1173 
1174 use_cache:
1175 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1176 			   file->f_mode & FMODE_WRITE);
1177 	if (!(file->f_flags & O_DIRECT))
1178 		goto out;
1179 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1180 		goto out;
1181 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1182 
1183 out:
1184 	free_dentry_path(page);
1185 	free_xid(xid);
1186 	cifs_put_tlink(tlink);
1187 	cifs_free_open_info(&data);
1188 	return rc;
1189 }
1190 
1191 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1192 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1193 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1194 
1195 /*
1196  * Try to reacquire byte range locks that were released when session
1197  * to server was lost.
1198  */
1199 static int
cifs_relock_file(struct cifsFileInfo * cfile)1200 cifs_relock_file(struct cifsFileInfo *cfile)
1201 {
1202 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1203 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1204 	int rc = 0;
1205 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1206 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
1207 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1208 
1209 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1210 	if (cinode->can_cache_brlcks) {
1211 		/* can cache locks - no need to relock */
1212 		up_read(&cinode->lock_sem);
1213 		return rc;
1214 	}
1215 
1216 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1217 	if (cap_unix(tcon->ses) &&
1218 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1219 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
1220 		rc = cifs_push_posix_locks(cfile);
1221 	else
1222 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1223 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1224 
1225 	up_read(&cinode->lock_sem);
1226 	return rc;
1227 }
1228 
1229 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1230 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1231 {
1232 	int rc = -EACCES;
1233 	unsigned int xid;
1234 	__u32 oplock;
1235 	struct cifs_sb_info *cifs_sb;
1236 	struct cifs_tcon *tcon;
1237 	struct TCP_Server_Info *server;
1238 	struct cifsInodeInfo *cinode;
1239 	struct inode *inode;
1240 	void *page;
1241 	const char *full_path;
1242 	int desired_access;
1243 	int disposition = FILE_OPEN;
1244 	int create_options = CREATE_NOT_DIR;
1245 	struct cifs_open_parms oparms;
1246 	int rdwr_for_fscache = 0;
1247 
1248 	xid = get_xid();
1249 	mutex_lock(&cfile->fh_mutex);
1250 	if (!cfile->invalidHandle) {
1251 		mutex_unlock(&cfile->fh_mutex);
1252 		free_xid(xid);
1253 		return 0;
1254 	}
1255 
1256 	inode = d_inode(cfile->dentry);
1257 	cifs_sb = CIFS_SB(inode->i_sb);
1258 	tcon = tlink_tcon(cfile->tlink);
1259 	server = tcon->ses->server;
1260 
1261 	/*
1262 	 * Can not grab rename sem here because various ops, including those
1263 	 * that already have the rename sem can end up causing writepage to get
1264 	 * called and if the server was down that means we end up here, and we
1265 	 * can never tell if the caller already has the rename_sem.
1266 	 */
1267 	page = alloc_dentry_path();
1268 	full_path = build_path_from_dentry(cfile->dentry, page);
1269 	if (IS_ERR(full_path)) {
1270 		mutex_unlock(&cfile->fh_mutex);
1271 		free_dentry_path(page);
1272 		free_xid(xid);
1273 		return PTR_ERR(full_path);
1274 	}
1275 
1276 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1277 		 inode, cfile->f_flags, full_path);
1278 
1279 	if (tcon->ses->server->oplocks)
1280 		oplock = REQ_OPLOCK;
1281 	else
1282 		oplock = 0;
1283 
1284 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1285 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1286 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1287 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1288 		/*
1289 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1290 		 * original open. Must mask them off for a reopen.
1291 		 */
1292 		unsigned int oflags = cfile->f_flags &
1293 						~(O_CREAT | O_EXCL | O_TRUNC);
1294 
1295 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1296 				     cifs_sb->ctx->file_mode /* ignored */,
1297 				     oflags, &oplock, &cfile->fid.netfid, xid);
1298 		if (rc == 0) {
1299 			cifs_dbg(FYI, "posix reopen succeeded\n");
1300 			oparms.reconnect = true;
1301 			goto reopen_success;
1302 		}
1303 		/*
1304 		 * fallthrough to retry open the old way on errors, especially
1305 		 * in the reconnect path it is important to retry hard
1306 		 */
1307 	}
1308 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1309 
1310 	/* If we're caching, we need to be able to fill in around partial writes. */
1311 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1312 		rdwr_for_fscache = 1;
1313 
1314 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1315 	create_options |= cifs_open_create_options(cfile->f_flags,
1316 						   create_options);
1317 
1318 	if (server->ops->get_lease_key)
1319 		server->ops->get_lease_key(inode, &cfile->fid);
1320 
1321 retry_open:
1322 	oparms = (struct cifs_open_parms) {
1323 		.tcon = tcon,
1324 		.cifs_sb = cifs_sb,
1325 		.desired_access = desired_access,
1326 		.create_options = cifs_create_options(cifs_sb, create_options),
1327 		.disposition = disposition,
1328 		.path = full_path,
1329 		.fid = &cfile->fid,
1330 		.reconnect = true,
1331 	};
1332 
1333 	/*
1334 	 * Can not refresh inode by passing in file_info buf to be returned by
1335 	 * ops->open and then calling get_inode_info with returned buf since
1336 	 * file might have write behind data that needs to be flushed and server
1337 	 * version of file size can be stale. If we knew for sure that inode was
1338 	 * not dirty locally we could do this.
1339 	 */
1340 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1341 	if (rc == -ENOENT && oparms.reconnect == false) {
1342 		/* durable handle timeout is expired - open the file again */
1343 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1344 		/* indicate that we need to relock the file */
1345 		oparms.reconnect = true;
1346 	}
1347 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1348 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1349 		rdwr_for_fscache = 2;
1350 		goto retry_open;
1351 	}
1352 
1353 	if (rc) {
1354 		mutex_unlock(&cfile->fh_mutex);
1355 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1356 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1357 		goto reopen_error_exit;
1358 	}
1359 
1360 	if (rdwr_for_fscache == 2)
1361 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1362 
1363 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1364 reopen_success:
1365 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1366 	cfile->invalidHandle = false;
1367 	mutex_unlock(&cfile->fh_mutex);
1368 	cinode = CIFS_I(inode);
1369 
1370 	if (can_flush) {
1371 		rc = filemap_write_and_wait(inode->i_mapping);
1372 		if (!is_interrupt_error(rc))
1373 			mapping_set_error(inode->i_mapping, rc);
1374 
1375 		if (tcon->posix_extensions) {
1376 			rc = smb311_posix_get_inode_info(&inode, full_path,
1377 							 NULL, inode->i_sb, xid);
1378 		} else if (tcon->unix_ext) {
1379 			rc = cifs_get_inode_info_unix(&inode, full_path,
1380 						      inode->i_sb, xid);
1381 		} else {
1382 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1383 						 inode->i_sb, xid, NULL);
1384 		}
1385 	}
1386 	/*
1387 	 * Else we are writing out data to server already and could deadlock if
1388 	 * we tried to flush data, and since we do not know if we have data that
1389 	 * would invalidate the current end of file on the server we can not go
1390 	 * to the server to get the new inode info.
1391 	 */
1392 
1393 	/*
1394 	 * If the server returned a read oplock and we have mandatory brlocks,
1395 	 * set oplock level to None.
1396 	 */
1397 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1398 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1399 		oplock = 0;
1400 	}
1401 
1402 	scoped_guard(spinlock, &cinode->open_file_lock)
1403 		server->ops->set_fid(cfile, &cfile->fid, oplock);
1404 	if (oparms.reconnect)
1405 		cifs_relock_file(cfile);
1406 
1407 reopen_error_exit:
1408 	free_dentry_path(page);
1409 	free_xid(xid);
1410 	return rc;
1411 }
1412 
smb2_deferred_work_close(struct work_struct * work)1413 void smb2_deferred_work_close(struct work_struct *work)
1414 {
1415 	struct cifsFileInfo *cfile = container_of(work,
1416 			struct cifsFileInfo, deferred.work);
1417 
1418 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1419 	cifs_del_deferred_close(cfile);
1420 	cfile->deferred_close_scheduled = false;
1421 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1422 	_cifsFileInfo_put(cfile, true, false);
1423 }
1424 
1425 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1426 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1427 {
1428 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1429 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1430 	unsigned int oplock = READ_ONCE(cinode->oplock);
1431 
1432 	return cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1433 		(oplock == CIFS_CACHE_RHW_FLG || oplock == CIFS_CACHE_RH_FLG) &&
1434 		!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags);
1435 
1436 }
1437 
cifs_close(struct inode * inode,struct file * file)1438 int cifs_close(struct inode *inode, struct file *file)
1439 {
1440 	struct cifsFileInfo *cfile;
1441 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1442 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1443 	struct cifs_deferred_close *dclose;
1444 
1445 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1446 
1447 	if (file->private_data != NULL) {
1448 		cfile = file->private_data;
1449 		file->private_data = NULL;
1450 		dclose = kmalloc_obj(struct cifs_deferred_close);
1451 		if ((cfile->status_file_deleted == false) &&
1452 		    (smb2_can_defer_close(inode, dclose))) {
1453 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1454 				inode_set_mtime_to_ts(inode,
1455 						      inode_set_ctime_current(inode));
1456 			}
1457 			spin_lock(&cinode->deferred_lock);
1458 			cifs_add_deferred_close(cfile, dclose);
1459 			if (cfile->deferred_close_scheduled &&
1460 			    delayed_work_pending(&cfile->deferred)) {
1461 				/*
1462 				 * If there is no pending work, mod_delayed_work queues new work.
1463 				 * So, Increase the ref count to avoid use-after-free.
1464 				 */
1465 				if (!mod_delayed_work(deferredclose_wq,
1466 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1467 					cifsFileInfo_get(cfile);
1468 			} else {
1469 				/* Deferred close for files */
1470 				queue_delayed_work(deferredclose_wq,
1471 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1472 				cfile->deferred_close_scheduled = true;
1473 				spin_unlock(&cinode->deferred_lock);
1474 				return 0;
1475 			}
1476 			spin_unlock(&cinode->deferred_lock);
1477 			_cifsFileInfo_put(cfile, true, false);
1478 		} else {
1479 			_cifsFileInfo_put(cfile, true, false);
1480 			kfree(dclose);
1481 		}
1482 	}
1483 
1484 	/* return code from the ->release op is always ignored */
1485 	return 0;
1486 }
1487 
1488 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1489 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1490 {
1491 	struct cifsFileInfo *open_file, *tmp;
1492 	LIST_HEAD(tmp_list);
1493 
1494 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1495 		return;
1496 
1497 	tcon->need_reopen_files = false;
1498 
1499 	cifs_dbg(FYI, "Reopen persistent handles\n");
1500 
1501 	/* list all files open on tree connection, reopen resilient handles  */
1502 	spin_lock(&tcon->open_file_lock);
1503 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1504 		if (!open_file->invalidHandle)
1505 			continue;
1506 		cifsFileInfo_get(open_file);
1507 		list_add_tail(&open_file->rlist, &tmp_list);
1508 	}
1509 	spin_unlock(&tcon->open_file_lock);
1510 
1511 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1512 		if (cifs_reopen_file(open_file, false /* do not flush */))
1513 			tcon->need_reopen_files = true;
1514 		list_del_init(&open_file->rlist);
1515 		cifsFileInfo_put(open_file);
1516 	}
1517 }
1518 
cifs_closedir(struct inode * inode,struct file * file)1519 int cifs_closedir(struct inode *inode, struct file *file)
1520 {
1521 	int rc = 0;
1522 	unsigned int xid;
1523 	struct cifsFileInfo *cfile = file->private_data;
1524 	struct cifs_tcon *tcon;
1525 	struct TCP_Server_Info *server;
1526 	char *buf;
1527 
1528 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1529 
1530 	if (cfile == NULL)
1531 		return rc;
1532 
1533 	xid = get_xid();
1534 	tcon = tlink_tcon(cfile->tlink);
1535 	server = tcon->ses->server;
1536 
1537 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1538 	spin_lock(&cfile->file_info_lock);
1539 	if (server->ops->dir_needs_close(cfile)) {
1540 		cfile->invalidHandle = true;
1541 		spin_unlock(&cfile->file_info_lock);
1542 		if (server->ops->close_dir)
1543 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1544 		else
1545 			rc = -ENOSYS;
1546 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1547 		/* not much we can do if it fails anyway, ignore rc */
1548 		rc = 0;
1549 	} else
1550 		spin_unlock(&cfile->file_info_lock);
1551 
1552 	buf = cfile->srch_inf.ntwrk_buf_start;
1553 	if (buf) {
1554 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1555 		cfile->srch_inf.ntwrk_buf_start = NULL;
1556 		if (cfile->srch_inf.smallBuf)
1557 			cifs_small_buf_release(buf);
1558 		else
1559 			cifs_buf_release(buf);
1560 	}
1561 
1562 	cifs_put_tlink(cfile->tlink);
1563 	kfree(file->private_data);
1564 	file->private_data = NULL;
1565 	/* BB can we lock the filestruct while this is going on? */
1566 	free_xid(xid);
1567 	return rc;
1568 }
1569 
1570 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1571 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1572 {
1573 	struct cifsLockInfo *lock =
1574 		kmalloc_obj(struct cifsLockInfo);
1575 	if (!lock)
1576 		return lock;
1577 	lock->offset = offset;
1578 	lock->length = length;
1579 	lock->type = type;
1580 	lock->pid = current->tgid;
1581 	lock->flags = flags;
1582 	INIT_LIST_HEAD(&lock->blist);
1583 	init_waitqueue_head(&lock->block_q);
1584 	return lock;
1585 }
1586 
1587 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1588 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1589 {
1590 	struct cifsLockInfo *li, *tmp;
1591 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1592 		list_del_init(&li->blist);
1593 		wake_up(&li->block_q);
1594 	}
1595 }
1596 
1597 #define CIFS_LOCK_OP	0
1598 #define CIFS_READ_OP	1
1599 #define CIFS_WRITE_OP	2
1600 
1601 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1602 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1603 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1604 			    __u64 length, __u8 type, __u16 flags,
1605 			    struct cifsFileInfo *cfile,
1606 			    struct cifsLockInfo **conf_lock, int rw_check)
1607 {
1608 	struct cifsLockInfo *li;
1609 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1610 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1611 
1612 	list_for_each_entry(li, &fdlocks->locks, llist) {
1613 		if (offset + length <= li->offset ||
1614 		    offset >= li->offset + li->length)
1615 			continue;
1616 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1617 		    server->ops->compare_fids(cfile, cur_cfile)) {
1618 			/* shared lock prevents write op through the same fid */
1619 			if (!(li->type & server->vals->shared_lock_type) ||
1620 			    rw_check != CIFS_WRITE_OP)
1621 				continue;
1622 		}
1623 		if ((type & server->vals->shared_lock_type) &&
1624 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1625 		     current->tgid == li->pid) || type == li->type))
1626 			continue;
1627 		if (rw_check == CIFS_LOCK_OP &&
1628 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1629 		    server->ops->compare_fids(cfile, cur_cfile))
1630 			continue;
1631 		if (conf_lock)
1632 			*conf_lock = li;
1633 		return true;
1634 	}
1635 	return false;
1636 }
1637 
1638 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1639 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1640 			__u8 type, __u16 flags,
1641 			struct cifsLockInfo **conf_lock, int rw_check)
1642 {
1643 	bool rc = false;
1644 	struct cifs_fid_locks *cur;
1645 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1646 
1647 	list_for_each_entry(cur, &cinode->llist, llist) {
1648 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1649 						 flags, cfile, conf_lock,
1650 						 rw_check);
1651 		if (rc)
1652 			break;
1653 	}
1654 
1655 	return rc;
1656 }
1657 
1658 /*
1659  * Check if there is another lock that prevents us to set the lock (mandatory
1660  * style). If such a lock exists, update the flock structure with its
1661  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1662  * or leave it the same if we can't. Returns 0 if we don't need to request to
1663  * the server or 1 otherwise.
1664  */
1665 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1666 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1667 	       __u8 type, struct file_lock *flock)
1668 {
1669 	int rc = 0;
1670 	struct cifsLockInfo *conf_lock;
1671 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1672 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1673 	bool exist;
1674 
1675 	down_read(&cinode->lock_sem);
1676 
1677 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1678 					flock->c.flc_flags, &conf_lock,
1679 					CIFS_LOCK_OP);
1680 	if (exist) {
1681 		flock->fl_start = conf_lock->offset;
1682 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1683 		flock->c.flc_pid = conf_lock->pid;
1684 		if (conf_lock->type & server->vals->shared_lock_type)
1685 			flock->c.flc_type = F_RDLCK;
1686 		else
1687 			flock->c.flc_type = F_WRLCK;
1688 	} else if (!cinode->can_cache_brlcks)
1689 		rc = 1;
1690 	else
1691 		flock->c.flc_type = F_UNLCK;
1692 
1693 	up_read(&cinode->lock_sem);
1694 	return rc;
1695 }
1696 
1697 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1698 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1699 {
1700 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1701 	cifs_down_write(&cinode->lock_sem);
1702 	list_add_tail(&lock->llist, &cfile->llist->locks);
1703 	up_write(&cinode->lock_sem);
1704 }
1705 
1706 /*
1707  * Set the byte-range lock (mandatory style). Returns:
1708  * 1) 0, if we set the lock and don't need to request to the server;
1709  * 2) 1, if no locks prevent us but we need to request to the server;
1710  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1711  */
1712 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1713 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1714 		 bool wait)
1715 {
1716 	struct cifsLockInfo *conf_lock;
1717 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1718 	bool exist;
1719 	int rc = 0;
1720 
1721 try_again:
1722 	exist = false;
1723 	cifs_down_write(&cinode->lock_sem);
1724 
1725 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1726 					lock->type, lock->flags, &conf_lock,
1727 					CIFS_LOCK_OP);
1728 	if (!exist && cinode->can_cache_brlcks) {
1729 		list_add_tail(&lock->llist, &cfile->llist->locks);
1730 		up_write(&cinode->lock_sem);
1731 		return rc;
1732 	}
1733 
1734 	if (!exist)
1735 		rc = 1;
1736 	else if (!wait)
1737 		rc = -EACCES;
1738 	else {
1739 		list_add_tail(&lock->blist, &conf_lock->blist);
1740 		up_write(&cinode->lock_sem);
1741 		rc = wait_event_interruptible(lock->block_q,
1742 					(lock->blist.prev == &lock->blist) &&
1743 					(lock->blist.next == &lock->blist));
1744 		if (!rc)
1745 			goto try_again;
1746 		cifs_down_write(&cinode->lock_sem);
1747 		list_del_init(&lock->blist);
1748 	}
1749 
1750 	up_write(&cinode->lock_sem);
1751 	return rc;
1752 }
1753 
1754 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1755 /*
1756  * Check if there is another lock that prevents us to set the lock (posix
1757  * style). If such a lock exists, update the flock structure with its
1758  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1759  * or leave it the same if we can't. Returns 0 if we don't need to request to
1760  * the server or 1 otherwise.
1761  */
1762 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1763 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1764 {
1765 	int rc = 0;
1766 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1767 	unsigned char saved_type = flock->c.flc_type;
1768 
1769 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1770 		return 1;
1771 
1772 	down_read(&cinode->lock_sem);
1773 	posix_test_lock(file, flock);
1774 
1775 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1776 		flock->c.flc_type = saved_type;
1777 		rc = 1;
1778 	}
1779 
1780 	up_read(&cinode->lock_sem);
1781 	return rc;
1782 }
1783 
1784 /*
1785  * Set the byte-range lock (posix style). Returns:
1786  * 1) <0, if the error occurs while setting the lock;
1787  * 2) 0, if we set the lock and don't need to request to the server;
1788  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1789  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1790  */
1791 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1792 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1793 {
1794 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1795 	int rc = FILE_LOCK_DEFERRED + 1;
1796 
1797 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1798 		return rc;
1799 
1800 	cifs_down_write(&cinode->lock_sem);
1801 	if (!cinode->can_cache_brlcks) {
1802 		up_write(&cinode->lock_sem);
1803 		return rc;
1804 	}
1805 
1806 	rc = posix_lock_file(file, flock, NULL);
1807 	up_write(&cinode->lock_sem);
1808 	return rc;
1809 }
1810 
1811 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1812 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1813 {
1814 	unsigned int xid;
1815 	int rc = 0, stored_rc;
1816 	struct cifsLockInfo *li, *tmp;
1817 	struct cifs_tcon *tcon;
1818 	unsigned int num, max_num, max_buf;
1819 	LOCKING_ANDX_RANGE *buf, *cur;
1820 	static const int types[] = {
1821 		LOCKING_ANDX_LARGE_FILES,
1822 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1823 	};
1824 	int i;
1825 
1826 	xid = get_xid();
1827 	tcon = tlink_tcon(cfile->tlink);
1828 
1829 	/*
1830 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1831 	 * and check it before using.
1832 	 */
1833 	max_buf = tcon->ses->server->maxBuf;
1834 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1835 		free_xid(xid);
1836 		return -EINVAL;
1837 	}
1838 
1839 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1840 		     PAGE_SIZE);
1841 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1842 			PAGE_SIZE);
1843 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1844 						sizeof(LOCKING_ANDX_RANGE);
1845 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
1846 	if (!buf) {
1847 		free_xid(xid);
1848 		return -ENOMEM;
1849 	}
1850 
1851 	for (i = 0; i < 2; i++) {
1852 		cur = buf;
1853 		num = 0;
1854 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1855 			if (li->type != types[i])
1856 				continue;
1857 			cur->Pid = cpu_to_le16(li->pid);
1858 			cur->LengthLow = cpu_to_le32((u32)li->length);
1859 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1860 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1861 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1862 			if (++num == max_num) {
1863 				stored_rc = cifs_lockv(xid, tcon,
1864 						       cfile->fid.netfid,
1865 						       (__u8)li->type, 0, num,
1866 						       buf);
1867 				if (stored_rc)
1868 					rc = stored_rc;
1869 				cur = buf;
1870 				num = 0;
1871 			} else
1872 				cur++;
1873 		}
1874 
1875 		if (num) {
1876 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1877 					       (__u8)types[i], 0, num, buf);
1878 			if (stored_rc)
1879 				rc = stored_rc;
1880 		}
1881 	}
1882 
1883 	kfree(buf);
1884 	free_xid(xid);
1885 	return rc;
1886 }
1887 
1888 static __u32
hash_lockowner(fl_owner_t owner)1889 hash_lockowner(fl_owner_t owner)
1890 {
1891 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1892 }
1893 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1894 
1895 struct lock_to_push {
1896 	struct list_head llist;
1897 	__u64 offset;
1898 	__u64 length;
1899 	__u32 pid;
1900 	__u16 netfid;
1901 	__u8 type;
1902 };
1903 
1904 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1905 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1906 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1907 {
1908 	struct inode *inode = d_inode(cfile->dentry);
1909 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1910 	struct file_lock *flock;
1911 	struct file_lock_context *flctx = locks_inode_context(inode);
1912 	unsigned int count = 0, i;
1913 	int rc = 0, xid, type;
1914 	struct list_head locks_to_send, *el;
1915 	struct lock_to_push *lck, *tmp;
1916 	__u64 length;
1917 
1918 	xid = get_xid();
1919 
1920 	if (!flctx)
1921 		goto out;
1922 
1923 	spin_lock(&flctx->flc_lock);
1924 	list_for_each(el, &flctx->flc_posix) {
1925 		count++;
1926 	}
1927 	spin_unlock(&flctx->flc_lock);
1928 
1929 	INIT_LIST_HEAD(&locks_to_send);
1930 
1931 	/*
1932 	 * Allocating count locks is enough because no FL_POSIX locks can be
1933 	 * added to the list while we are holding cinode->lock_sem that
1934 	 * protects locking operations of this inode.
1935 	 */
1936 	for (i = 0; i < count; i++) {
1937 		lck = kmalloc_obj(struct lock_to_push);
1938 		if (!lck) {
1939 			rc = -ENOMEM;
1940 			goto err_out;
1941 		}
1942 		list_add_tail(&lck->llist, &locks_to_send);
1943 	}
1944 
1945 	el = locks_to_send.next;
1946 	spin_lock(&flctx->flc_lock);
1947 	for_each_file_lock(flock, &flctx->flc_posix) {
1948 		unsigned char ftype = flock->c.flc_type;
1949 
1950 		if (el == &locks_to_send) {
1951 			/*
1952 			 * The list ended. We don't have enough allocated
1953 			 * structures - something is really wrong.
1954 			 */
1955 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1956 			break;
1957 		}
1958 		length = cifs_flock_len(flock);
1959 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1960 			type = CIFS_RDLCK;
1961 		else
1962 			type = CIFS_WRLCK;
1963 		lck = list_entry(el, struct lock_to_push, llist);
1964 		lck->pid = hash_lockowner(flock->c.flc_owner);
1965 		lck->netfid = cfile->fid.netfid;
1966 		lck->length = length;
1967 		lck->type = type;
1968 		lck->offset = flock->fl_start;
1969 	}
1970 	spin_unlock(&flctx->flc_lock);
1971 
1972 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1973 		int stored_rc;
1974 
1975 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1976 					     lck->offset, lck->length, NULL,
1977 					     lck->type, 0);
1978 		if (stored_rc)
1979 			rc = stored_rc;
1980 		list_del(&lck->llist);
1981 		kfree(lck);
1982 	}
1983 
1984 out:
1985 	free_xid(xid);
1986 	return rc;
1987 err_out:
1988 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1989 		list_del(&lck->llist);
1990 		kfree(lck);
1991 	}
1992 	goto out;
1993 }
1994 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1995 
1996 static int
cifs_push_locks(struct cifsFileInfo * cfile)1997 cifs_push_locks(struct cifsFileInfo *cfile)
1998 {
1999 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2000 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2001 	int rc = 0;
2002 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2003 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
2004 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2005 
2006 	/* we are going to update can_cache_brlcks here - need a write access */
2007 	cifs_down_write(&cinode->lock_sem);
2008 	if (!cinode->can_cache_brlcks) {
2009 		up_write(&cinode->lock_sem);
2010 		return rc;
2011 	}
2012 
2013 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2014 	if (cap_unix(tcon->ses) &&
2015 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2016 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2017 		rc = cifs_push_posix_locks(cfile);
2018 	else
2019 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2020 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
2021 
2022 	cinode->can_cache_brlcks = false;
2023 	up_write(&cinode->lock_sem);
2024 	return rc;
2025 }
2026 
2027 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)2028 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
2029 		bool *wait_flag, struct TCP_Server_Info *server)
2030 {
2031 	if (flock->c.flc_flags & FL_POSIX)
2032 		cifs_dbg(FYI, "Posix\n");
2033 	if (flock->c.flc_flags & FL_FLOCK)
2034 		cifs_dbg(FYI, "Flock\n");
2035 	if (flock->c.flc_flags & FL_SLEEP) {
2036 		cifs_dbg(FYI, "Blocking lock\n");
2037 		*wait_flag = true;
2038 	}
2039 	if (flock->c.flc_flags & FL_ACCESS)
2040 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
2041 	if (flock->c.flc_flags & FL_LEASE)
2042 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
2043 	if (flock->c.flc_flags &
2044 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
2045 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
2046 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
2047 		         flock->c.flc_flags);
2048 
2049 	*type = server->vals->large_lock_type;
2050 	if (lock_is_write(flock)) {
2051 		cifs_dbg(FYI, "F_WRLCK\n");
2052 		*type |= server->vals->exclusive_lock_type;
2053 		*lock = 1;
2054 	} else if (lock_is_unlock(flock)) {
2055 		cifs_dbg(FYI, "F_UNLCK\n");
2056 		*type |= server->vals->unlock_lock_type;
2057 		*unlock = 1;
2058 		/* Check if unlock includes more than one lock range */
2059 	} else if (lock_is_read(flock)) {
2060 		cifs_dbg(FYI, "F_RDLCK\n");
2061 		*type |= server->vals->shared_lock_type;
2062 		*lock = 1;
2063 	} else if (flock->c.flc_type == F_EXLCK) {
2064 		cifs_dbg(FYI, "F_EXLCK\n");
2065 		*type |= server->vals->exclusive_lock_type;
2066 		*lock = 1;
2067 	} else if (flock->c.flc_type == F_SHLCK) {
2068 		cifs_dbg(FYI, "F_SHLCK\n");
2069 		*type |= server->vals->shared_lock_type;
2070 		*lock = 1;
2071 	} else
2072 		cifs_dbg(FYI, "Unknown type of lock\n");
2073 }
2074 
2075 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2076 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2077 	   bool wait_flag, bool posix_lck, unsigned int xid)
2078 {
2079 	int rc = 0;
2080 	__u64 length = cifs_flock_len(flock);
2081 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2082 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2083 	struct TCP_Server_Info *server = tcon->ses->server;
2084 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2085 	__u16 netfid = cfile->fid.netfid;
2086 
2087 	if (posix_lck) {
2088 		int posix_lock_type;
2089 
2090 		rc = cifs_posix_lock_test(file, flock);
2091 		if (!rc)
2092 			return rc;
2093 
2094 		if (type & server->vals->shared_lock_type)
2095 			posix_lock_type = CIFS_RDLCK;
2096 		else
2097 			posix_lock_type = CIFS_WRLCK;
2098 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2099 				      hash_lockowner(flock->c.flc_owner),
2100 				      flock->fl_start, length, flock,
2101 				      posix_lock_type, wait_flag);
2102 		return rc;
2103 	}
2104 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2105 
2106 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2107 	if (!rc)
2108 		return rc;
2109 
2110 	/* BB we could chain these into one lock request BB */
2111 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2112 				    1, 0, false);
2113 	if (rc == 0) {
2114 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2115 					    type, 0, 1, false);
2116 		flock->c.flc_type = F_UNLCK;
2117 		if (rc != 0)
2118 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2119 				 rc);
2120 		return 0;
2121 	}
2122 
2123 	if (type & server->vals->shared_lock_type) {
2124 		flock->c.flc_type = F_WRLCK;
2125 		return 0;
2126 	}
2127 
2128 	type &= ~server->vals->exclusive_lock_type;
2129 
2130 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2131 				    type | server->vals->shared_lock_type,
2132 				    1, 0, false);
2133 	if (rc == 0) {
2134 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2135 			type | server->vals->shared_lock_type, 0, 1, false);
2136 		flock->c.flc_type = F_RDLCK;
2137 		if (rc != 0)
2138 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2139 				 rc);
2140 	} else
2141 		flock->c.flc_type = F_WRLCK;
2142 
2143 	return 0;
2144 }
2145 
2146 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2147 cifs_move_llist(struct list_head *source, struct list_head *dest)
2148 {
2149 	struct list_head *li, *tmp;
2150 	list_for_each_safe(li, tmp, source)
2151 		list_move(li, dest);
2152 }
2153 
2154 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2155 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2156 				struct file *file)
2157 {
2158 	struct cifsFileInfo *open_file = NULL;
2159 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2160 	int rc = 0;
2161 
2162 	spin_lock(&tcon->open_file_lock);
2163 	spin_lock(&cinode->open_file_lock);
2164 
2165 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2166 		if (file->f_flags == open_file->f_flags) {
2167 			rc = -EINVAL;
2168 			break;
2169 		}
2170 	}
2171 
2172 	spin_unlock(&cinode->open_file_lock);
2173 	spin_unlock(&tcon->open_file_lock);
2174 	return rc;
2175 }
2176 
2177 void
cifs_free_llist(struct list_head * llist)2178 cifs_free_llist(struct list_head *llist)
2179 {
2180 	struct cifsLockInfo *li, *tmp;
2181 	list_for_each_entry_safe(li, tmp, llist, llist) {
2182 		cifs_del_lock_waiters(li);
2183 		list_del(&li->llist);
2184 		kfree(li);
2185 	}
2186 }
2187 
2188 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2189 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2190 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2191 		  unsigned int xid)
2192 {
2193 	int rc = 0, stored_rc;
2194 	static const int types[] = {
2195 		LOCKING_ANDX_LARGE_FILES,
2196 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2197 	};
2198 	unsigned int i;
2199 	unsigned int max_num, num, max_buf;
2200 	LOCKING_ANDX_RANGE *buf, *cur;
2201 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2202 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2203 	struct cifsLockInfo *li, *tmp;
2204 	__u64 length = cifs_flock_len(flock);
2205 	LIST_HEAD(tmp_llist);
2206 
2207 	/*
2208 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2209 	 * and check it before using.
2210 	 */
2211 	max_buf = tcon->ses->server->maxBuf;
2212 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2213 		return -EINVAL;
2214 
2215 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2216 		     PAGE_SIZE);
2217 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2218 			PAGE_SIZE);
2219 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2220 						sizeof(LOCKING_ANDX_RANGE);
2221 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
2222 	if (!buf)
2223 		return -ENOMEM;
2224 
2225 	cifs_down_write(&cinode->lock_sem);
2226 	for (i = 0; i < 2; i++) {
2227 		cur = buf;
2228 		num = 0;
2229 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2230 			if (flock->fl_start > li->offset ||
2231 			    (flock->fl_start + length) <
2232 			    (li->offset + li->length))
2233 				continue;
2234 			if (current->tgid != li->pid)
2235 				continue;
2236 			if (types[i] != li->type)
2237 				continue;
2238 			if (cinode->can_cache_brlcks) {
2239 				/*
2240 				 * We can cache brlock requests - simply remove
2241 				 * a lock from the file's list.
2242 				 */
2243 				list_del(&li->llist);
2244 				cifs_del_lock_waiters(li);
2245 				kfree(li);
2246 				continue;
2247 			}
2248 			cur->Pid = cpu_to_le16(li->pid);
2249 			cur->LengthLow = cpu_to_le32((u32)li->length);
2250 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2251 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2252 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2253 			/*
2254 			 * We need to save a lock here to let us add it again to
2255 			 * the file's list if the unlock range request fails on
2256 			 * the server.
2257 			 */
2258 			list_move(&li->llist, &tmp_llist);
2259 			if (++num == max_num) {
2260 				stored_rc = cifs_lockv(xid, tcon,
2261 						       cfile->fid.netfid,
2262 						       li->type, num, 0, buf);
2263 				if (stored_rc) {
2264 					/*
2265 					 * We failed on the unlock range
2266 					 * request - add all locks from the tmp
2267 					 * list to the head of the file's list.
2268 					 */
2269 					cifs_move_llist(&tmp_llist,
2270 							&cfile->llist->locks);
2271 					rc = stored_rc;
2272 				} else
2273 					/*
2274 					 * The unlock range request succeed -
2275 					 * free the tmp list.
2276 					 */
2277 					cifs_free_llist(&tmp_llist);
2278 				cur = buf;
2279 				num = 0;
2280 			} else
2281 				cur++;
2282 		}
2283 		if (num) {
2284 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2285 					       types[i], num, 0, buf);
2286 			if (stored_rc) {
2287 				cifs_move_llist(&tmp_llist,
2288 						&cfile->llist->locks);
2289 				rc = stored_rc;
2290 			} else
2291 				cifs_free_llist(&tmp_llist);
2292 		}
2293 	}
2294 
2295 	up_write(&cinode->lock_sem);
2296 	kfree(buf);
2297 	return rc;
2298 }
2299 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2300 
2301 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2302 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2303 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2304 	   unsigned int xid)
2305 {
2306 	int rc = 0;
2307 	__u64 length = cifs_flock_len(flock);
2308 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2309 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2310 	struct TCP_Server_Info *server = tcon->ses->server;
2311 	struct inode *inode = d_inode(cfile->dentry);
2312 
2313 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2314 	if (posix_lck) {
2315 		int posix_lock_type;
2316 
2317 		rc = cifs_posix_lock_set(file, flock);
2318 		if (rc <= FILE_LOCK_DEFERRED)
2319 			return rc;
2320 
2321 		if (type & server->vals->shared_lock_type)
2322 			posix_lock_type = CIFS_RDLCK;
2323 		else
2324 			posix_lock_type = CIFS_WRLCK;
2325 
2326 		if (unlock == 1)
2327 			posix_lock_type = CIFS_UNLCK;
2328 
2329 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2330 				      hash_lockowner(flock->c.flc_owner),
2331 				      flock->fl_start, length,
2332 				      NULL, posix_lock_type, wait_flag);
2333 		goto out;
2334 	}
2335 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2336 	if (lock) {
2337 		struct cifsLockInfo *lock;
2338 
2339 		lock = cifs_lock_init(flock->fl_start, length, type,
2340 				      flock->c.flc_flags);
2341 		if (!lock)
2342 			return -ENOMEM;
2343 
2344 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2345 		if (rc < 0) {
2346 			kfree(lock);
2347 			return rc;
2348 		}
2349 		if (!rc)
2350 			goto out;
2351 
2352 		/*
2353 		 * Windows 7 server can delay breaking lease from read to None
2354 		 * if we set a byte-range lock on a file - break it explicitly
2355 		 * before sending the lock to the server to be sure the next
2356 		 * read won't conflict with non-overlapted locks due to
2357 		 * pagereading.
2358 		 */
2359 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2360 					CIFS_CACHE_READ(CIFS_I(inode))) {
2361 			cifs_zap_mapping(inode);
2362 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2363 				 inode);
2364 			cifs_reset_oplock(CIFS_I(inode));
2365 		}
2366 
2367 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2368 					    type, 1, 0, wait_flag);
2369 		if (rc) {
2370 			kfree(lock);
2371 			return rc;
2372 		}
2373 
2374 		cifs_lock_add(cfile, lock);
2375 	} else if (unlock)
2376 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2377 
2378 out:
2379 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2380 		/*
2381 		 * If this is a request to remove all locks because we
2382 		 * are closing the file, it doesn't matter if the
2383 		 * unlocking failed as both cifs.ko and the SMB server
2384 		 * remove the lock on file close
2385 		 */
2386 		if (rc) {
2387 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2388 			if (!(flock->c.flc_flags & FL_CLOSE))
2389 				return rc;
2390 		}
2391 		rc = locks_lock_file_wait(file, flock);
2392 	}
2393 	return rc;
2394 }
2395 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2396 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2397 {
2398 	int rc, xid;
2399 	int lock = 0, unlock = 0;
2400 	bool wait_flag = false;
2401 	bool posix_lck = false;
2402 	struct cifs_sb_info *cifs_sb;
2403 	struct cifs_tcon *tcon;
2404 	struct cifsFileInfo *cfile;
2405 	__u32 type;
2406 
2407 	xid = get_xid();
2408 
2409 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2410 		rc = -ENOLCK;
2411 		free_xid(xid);
2412 		return rc;
2413 	}
2414 
2415 	cfile = (struct cifsFileInfo *)file->private_data;
2416 	tcon = tlink_tcon(cfile->tlink);
2417 
2418 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2419 			tcon->ses->server);
2420 	cifs_sb = CIFS_SB(file);
2421 
2422 	if (cap_unix(tcon->ses) &&
2423 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2424 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2425 		posix_lck = true;
2426 
2427 	if (!lock && !unlock) {
2428 		/*
2429 		 * if no lock or unlock then nothing to do since we do not
2430 		 * know what it is
2431 		 */
2432 		rc = -EOPNOTSUPP;
2433 		free_xid(xid);
2434 		return rc;
2435 	}
2436 
2437 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2438 			xid);
2439 	free_xid(xid);
2440 	return rc;
2441 
2442 
2443 }
2444 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2445 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2446 {
2447 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2448 	struct cifsFileInfo *cfile;
2449 	int lock = 0, unlock = 0;
2450 	bool wait_flag = false;
2451 	bool posix_lck = false;
2452 	struct cifs_tcon *tcon;
2453 	__u32 type;
2454 	int rc, xid;
2455 
2456 	rc = -EACCES;
2457 	xid = get_xid();
2458 
2459 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2460 		 flock->c.flc_flags, flock->c.flc_type,
2461 		 (long long)flock->fl_start,
2462 		 (long long)flock->fl_end);
2463 
2464 	cfile = (struct cifsFileInfo *)file->private_data;
2465 	tcon = tlink_tcon(cfile->tlink);
2466 
2467 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2468 			tcon->ses->server);
2469 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2470 
2471 	if (cap_unix(tcon->ses) &&
2472 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2473 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2474 		posix_lck = true;
2475 	/*
2476 	 * BB add code here to normalize offset and length to account for
2477 	 * negative length which we can not accept over the wire.
2478 	 */
2479 	if (IS_GETLK(cmd)) {
2480 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2481 		free_xid(xid);
2482 		return rc;
2483 	}
2484 
2485 	if (!lock && !unlock) {
2486 		/*
2487 		 * if no lock or unlock then nothing to do since we do not
2488 		 * know what it is
2489 		 */
2490 		free_xid(xid);
2491 		return -EOPNOTSUPP;
2492 	}
2493 
2494 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2495 			xid);
2496 	free_xid(xid);
2497 	return rc;
2498 }
2499 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result)2500 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2501 {
2502 	struct netfs_io_request *wreq = wdata->rreq;
2503 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2504 	loff_t wrend;
2505 
2506 	if (result > 0) {
2507 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2508 
2509 		if (wrend > ictx->zero_point &&
2510 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2511 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2512 			ictx->zero_point = wrend;
2513 		if (wrend > ictx->remote_i_size)
2514 			netfs_resize_file(ictx, wrend, true);
2515 	}
2516 
2517 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2518 }
2519 
open_flags_match(struct cifsInodeInfo * cinode,unsigned int oflags,unsigned int cflags)2520 static bool open_flags_match(struct cifsInodeInfo *cinode,
2521 			     unsigned int oflags, unsigned int cflags)
2522 {
2523 	struct inode *inode = &cinode->netfs.inode;
2524 	int crw = 0, orw = 0;
2525 
2526 	oflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2527 	cflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2528 
2529 	if (cifs_fscache_enabled(inode)) {
2530 		if (OPEN_FMODE(cflags) & FMODE_WRITE)
2531 			crw = 1;
2532 		if (OPEN_FMODE(oflags) & FMODE_WRITE)
2533 			orw = 1;
2534 	}
2535 	if (cifs_convert_flags(oflags, orw) != cifs_convert_flags(cflags, crw))
2536 		return false;
2537 
2538 	return (oflags & (O_SYNC | O_DIRECT)) == (cflags & (O_SYNC | O_DIRECT));
2539 }
2540 
__find_readable_file(struct cifsInodeInfo * cifs_inode,unsigned int find_flags,unsigned int open_flags)2541 struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode,
2542 					  unsigned int find_flags,
2543 					  unsigned int open_flags)
2544 {
2545 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode);
2546 	bool fsuid_only = find_flags & FIND_FSUID_ONLY;
2547 	struct cifsFileInfo *open_file = NULL;
2548 
2549 	/* only filter by fsuid on multiuser mounts */
2550 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2551 		fsuid_only = false;
2552 
2553 	spin_lock(&cifs_inode->open_file_lock);
2554 	/* we could simply get the first_list_entry since write-only entries
2555 	   are always at the end of the list but since the first entry might
2556 	   have a close pending, we go through the whole list */
2557 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2558 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2559 			continue;
2560 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2561 		    open_file->status_file_deleted)
2562 			continue;
2563 		if ((find_flags & FIND_OPEN_FLAGS) &&
2564 		    !open_flags_match(cifs_inode, open_flags,
2565 				      open_file->f_flags))
2566 			continue;
2567 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2568 			if ((!open_file->invalidHandle)) {
2569 				/* found a good file */
2570 				/* lock it so it will not be closed on us */
2571 				cifsFileInfo_get(open_file);
2572 				spin_unlock(&cifs_inode->open_file_lock);
2573 				return open_file;
2574 			} /* else might as well continue, and look for
2575 			     another, or simply have the caller reopen it
2576 			     again rather than trying to fix this handle */
2577 		} else /* write only file */
2578 			break; /* write only files are last so must be done */
2579 	}
2580 	spin_unlock(&cifs_inode->open_file_lock);
2581 	return NULL;
2582 }
2583 
2584 /* Return -EBADF if no handle is found and general rc otherwise */
__cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,unsigned int find_flags,unsigned int open_flags,struct cifsFileInfo ** ret_file)2585 int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
2586 			     unsigned int find_flags, unsigned int open_flags,
2587 			     struct cifsFileInfo **ret_file)
2588 {
2589 	struct cifsFileInfo *open_file, *inv_file = NULL;
2590 	bool fsuid_only, with_delete;
2591 	struct cifs_sb_info *cifs_sb;
2592 	bool any_available = false;
2593 	unsigned int refind = 0;
2594 	*ret_file = NULL;
2595 	int rc = -EBADF;
2596 
2597 	/*
2598 	 * Having a null inode here (because mapping->host was set to zero by
2599 	 * the VFS or MM) should not happen but we had reports of on oops (due
2600 	 * to it being zero) during stress testcases so we need to check for it
2601 	 */
2602 
2603 	if (cifs_inode == NULL) {
2604 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2605 		dump_stack();
2606 		return rc;
2607 	}
2608 
2609 	if (test_bit(CIFS_INO_TMPFILE, &cifs_inode->flags))
2610 		find_flags = FIND_ANY;
2611 
2612 	cifs_sb = CIFS_SB(cifs_inode);
2613 
2614 	with_delete = find_flags & FIND_WITH_DELETE;
2615 	fsuid_only = find_flags & FIND_FSUID_ONLY;
2616 	/* only filter by fsuid on multiuser mounts */
2617 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2618 		fsuid_only = false;
2619 
2620 	spin_lock(&cifs_inode->open_file_lock);
2621 refind_writable:
2622 	if (refind > MAX_REOPEN_ATT) {
2623 		spin_unlock(&cifs_inode->open_file_lock);
2624 		return rc;
2625 	}
2626 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2627 		if (!any_available && open_file->pid != current->tgid)
2628 			continue;
2629 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2630 			continue;
2631 		if (with_delete && !(open_file->fid.access & DELETE))
2632 			continue;
2633 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2634 		    open_file->status_file_deleted)
2635 			continue;
2636 		if ((find_flags & FIND_OPEN_FLAGS) &&
2637 		    !open_flags_match(cifs_inode, open_flags,
2638 				      open_file->f_flags))
2639 			continue;
2640 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2641 			if (!open_file->invalidHandle) {
2642 				/* found a good writable file */
2643 				cifsFileInfo_get(open_file);
2644 				spin_unlock(&cifs_inode->open_file_lock);
2645 				*ret_file = open_file;
2646 				return 0;
2647 			} else {
2648 				if (!inv_file)
2649 					inv_file = open_file;
2650 			}
2651 		}
2652 	}
2653 	/* couldn't find usable FH with same pid, try any available */
2654 	if (!any_available) {
2655 		any_available = true;
2656 		goto refind_writable;
2657 	}
2658 
2659 	if (inv_file) {
2660 		any_available = false;
2661 		cifsFileInfo_get(inv_file);
2662 	}
2663 
2664 	spin_unlock(&cifs_inode->open_file_lock);
2665 
2666 	if (inv_file) {
2667 		rc = cifs_reopen_file(inv_file, false);
2668 		if (!rc) {
2669 			*ret_file = inv_file;
2670 			return 0;
2671 		}
2672 
2673 		spin_lock(&cifs_inode->open_file_lock);
2674 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2675 		spin_unlock(&cifs_inode->open_file_lock);
2676 		cifsFileInfo_put(inv_file);
2677 		++refind;
2678 		inv_file = NULL;
2679 		spin_lock(&cifs_inode->open_file_lock);
2680 		goto refind_writable;
2681 	}
2682 
2683 	return rc;
2684 }
2685 
2686 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2687 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2688 {
2689 	struct cifsFileInfo *cfile;
2690 	int rc;
2691 
2692 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2693 	if (rc)
2694 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2695 
2696 	return cfile;
2697 }
2698 
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,struct inode * inode,int flags,struct cifsFileInfo ** ret_file)2699 int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2700 			   struct inode *inode, int flags,
2701 			   struct cifsFileInfo **ret_file)
2702 {
2703 	struct cifsFileInfo *cfile;
2704 	void *page;
2705 
2706 	*ret_file = NULL;
2707 
2708 	if (inode)
2709 		return cifs_get_writable_file(CIFS_I(inode), flags, ret_file);
2710 
2711 	page = alloc_dentry_path();
2712 	spin_lock(&tcon->open_file_lock);
2713 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2714 		struct cifsInodeInfo *cinode;
2715 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2716 		if (IS_ERR(full_path)) {
2717 			spin_unlock(&tcon->open_file_lock);
2718 			free_dentry_path(page);
2719 			return PTR_ERR(full_path);
2720 		}
2721 		if (strcmp(full_path, name))
2722 			continue;
2723 
2724 		cinode = CIFS_I(d_inode(cfile->dentry));
2725 		spin_unlock(&tcon->open_file_lock);
2726 		free_dentry_path(page);
2727 		return cifs_get_writable_file(cinode, flags, ret_file);
2728 	}
2729 
2730 	spin_unlock(&tcon->open_file_lock);
2731 	free_dentry_path(page);
2732 	return -ENOENT;
2733 }
2734 
2735 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2736 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2737 		       struct cifsFileInfo **ret_file)
2738 {
2739 	struct cifsFileInfo *cfile;
2740 	void *page = alloc_dentry_path();
2741 
2742 	*ret_file = NULL;
2743 
2744 	spin_lock(&tcon->open_file_lock);
2745 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2746 		struct cifsInodeInfo *cinode;
2747 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2748 		if (IS_ERR(full_path)) {
2749 			spin_unlock(&tcon->open_file_lock);
2750 			free_dentry_path(page);
2751 			return PTR_ERR(full_path);
2752 		}
2753 		if (strcmp(full_path, name))
2754 			continue;
2755 
2756 		cinode = CIFS_I(d_inode(cfile->dentry));
2757 		spin_unlock(&tcon->open_file_lock);
2758 		free_dentry_path(page);
2759 		*ret_file = find_readable_file(cinode, FIND_ANY);
2760 		return *ret_file ? 0 : -ENOENT;
2761 	}
2762 
2763 	spin_unlock(&tcon->open_file_lock);
2764 	free_dentry_path(page);
2765 	return -ENOENT;
2766 }
2767 
2768 /*
2769  * Flush data on a strict file.
2770  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2771 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2772 		      int datasync)
2773 {
2774 	struct cifsFileInfo *smbfile = file->private_data;
2775 	struct inode *inode = file_inode(file);
2776 	unsigned int xid;
2777 	int rc;
2778 
2779 	rc = file_write_and_wait_range(file, start, end);
2780 	if (rc) {
2781 		trace_cifs_fsync_err(inode->i_ino, rc);
2782 		return rc;
2783 	}
2784 
2785 	cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync);
2786 
2787 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2788 		rc = cifs_zap_mapping(inode);
2789 		cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc);
2790 	}
2791 
2792 	xid = get_xid();
2793 	rc = cifs_file_flush(xid, inode, smbfile);
2794 	free_xid(xid);
2795 	return rc;
2796 }
2797 
2798 /*
2799  * Flush data on a non-strict data.
2800  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2801 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2802 {
2803 	unsigned int xid;
2804 	int rc = 0;
2805 	struct cifs_tcon *tcon;
2806 	struct TCP_Server_Info *server;
2807 	struct cifsFileInfo *smbfile = file->private_data;
2808 	struct inode *inode = file_inode(file);
2809 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2810 
2811 	rc = file_write_and_wait_range(file, start, end);
2812 	if (rc) {
2813 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2814 		return rc;
2815 	}
2816 
2817 	xid = get_xid();
2818 
2819 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2820 		 file, datasync);
2821 
2822 	tcon = tlink_tcon(smbfile->tlink);
2823 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)) {
2824 		server = tcon->ses->server;
2825 		if (server->ops->flush == NULL) {
2826 			rc = -ENOSYS;
2827 			goto fsync_exit;
2828 		}
2829 
2830 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2831 			smbfile = find_writable_file(CIFS_I(inode), FIND_ANY);
2832 			if (smbfile) {
2833 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2834 				cifsFileInfo_put(smbfile);
2835 			} else
2836 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2837 		} else
2838 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2839 	}
2840 
2841 fsync_exit:
2842 	free_xid(xid);
2843 	return rc;
2844 }
2845 
2846 /*
2847  * As file closes, flush all cached write data for this inode checking
2848  * for write behind errors.
2849  */
cifs_flush(struct file * file,fl_owner_t id)2850 int cifs_flush(struct file *file, fl_owner_t id)
2851 {
2852 	struct inode *inode = file_inode(file);
2853 	int rc = 0;
2854 
2855 	if (file->f_mode & FMODE_WRITE)
2856 		rc = filemap_write_and_wait(inode->i_mapping);
2857 
2858 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2859 	if (rc) {
2860 		/* get more nuanced writeback errors */
2861 		rc = filemap_check_wb_err(file->f_mapping, 0);
2862 		trace_cifs_flush_err(inode->i_ino, rc);
2863 	}
2864 	return rc;
2865 }
2866 
2867 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2868 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2869 {
2870 	struct file *file = iocb->ki_filp;
2871 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2872 	struct inode *inode = file->f_mapping->host;
2873 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2874 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2875 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2876 	ssize_t rc;
2877 
2878 	rc = netfs_start_io_write(inode);
2879 	if (rc < 0)
2880 		return rc;
2881 
2882 	/*
2883 	 * We need to hold the sem to be sure nobody modifies lock list
2884 	 * with a brlock that prevents writing.
2885 	 */
2886 	down_read(&cinode->lock_sem);
2887 
2888 	rc = generic_write_checks(iocb, from);
2889 	if (rc <= 0)
2890 		goto out;
2891 
2892 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) &&
2893 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2894 				     server->vals->exclusive_lock_type, 0,
2895 				     NULL, CIFS_WRITE_OP))) {
2896 		rc = -EACCES;
2897 		goto out;
2898 	}
2899 
2900 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2901 
2902 out:
2903 	up_read(&cinode->lock_sem);
2904 	netfs_end_io_write(inode);
2905 	if (rc > 0)
2906 		rc = generic_write_sync(iocb, rc);
2907 	return rc;
2908 }
2909 
2910 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2911 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2912 {
2913 	struct inode *inode = file_inode(iocb->ki_filp);
2914 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2915 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2916 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2917 						iocb->ki_filp->private_data;
2918 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2919 	ssize_t written;
2920 
2921 	written = cifs_get_writer(cinode);
2922 	if (written)
2923 		return written;
2924 
2925 	if (CIFS_CACHE_WRITE(cinode)) {
2926 		if (cap_unix(tcon->ses) &&
2927 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2928 		    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2929 			written = netfs_file_write_iter(iocb, from);
2930 			goto out;
2931 		}
2932 		written = cifs_writev(iocb, from);
2933 		goto out;
2934 	}
2935 	/*
2936 	 * For non-oplocked files in strict cache mode we need to write the data
2937 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2938 	 * affected pages because it may cause a error with mandatory locks on
2939 	 * these pages but not on the region from pos to ppos+len-1.
2940 	 */
2941 	written = netfs_file_write_iter(iocb, from);
2942 	if (CIFS_CACHE_READ(cinode)) {
2943 		/*
2944 		 * We have read level caching and we have just sent a write
2945 		 * request to the server thus making data in the cache stale.
2946 		 * Zap the cache and set oplock/lease level to NONE to avoid
2947 		 * reading stale data from the cache. All subsequent read
2948 		 * operations will read new data from the server.
2949 		 */
2950 		cifs_zap_mapping(inode);
2951 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2952 			 inode);
2953 		cifs_reset_oplock(cinode);
2954 	}
2955 out:
2956 	cifs_put_writer(cinode);
2957 	return written;
2958 }
2959 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2960 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2961 {
2962 	ssize_t rc;
2963 	struct inode *inode = file_inode(iocb->ki_filp);
2964 
2965 	if (iocb->ki_flags & IOCB_DIRECT)
2966 		return netfs_unbuffered_read_iter(iocb, iter);
2967 
2968 	rc = cifs_revalidate_mapping(inode);
2969 	if (rc)
2970 		return rc;
2971 
2972 	return netfs_file_read_iter(iocb, iter);
2973 }
2974 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2975 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2976 {
2977 	struct inode *inode = file_inode(iocb->ki_filp);
2978 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2979 	ssize_t written;
2980 	int rc;
2981 
2982 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2983 		written = netfs_unbuffered_write_iter(iocb, from);
2984 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2985 			cifs_zap_mapping(inode);
2986 			cifs_dbg(FYI,
2987 				 "Set no oplock for inode=%p after a write operation\n",
2988 				 inode);
2989 			cifs_reset_oplock(cinode);
2990 		}
2991 		return written;
2992 	}
2993 
2994 	written = cifs_get_writer(cinode);
2995 	if (written)
2996 		return written;
2997 
2998 	written = netfs_file_write_iter(iocb, from);
2999 
3000 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
3001 		rc = filemap_fdatawrite(inode->i_mapping);
3002 		if (rc)
3003 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
3004 				 rc, inode);
3005 	}
3006 
3007 	cifs_put_writer(cinode);
3008 	return written;
3009 }
3010 
3011 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)3012 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3013 {
3014 	struct inode *inode = file_inode(iocb->ki_filp);
3015 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3016 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
3017 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3018 						iocb->ki_filp->private_data;
3019 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3020 	int rc = -EACCES;
3021 
3022 	/*
3023 	 * In strict cache mode we need to read from the server all the time
3024 	 * if we don't have level II oplock because the server can delay mtime
3025 	 * change - so we can't make a decision about inode invalidating.
3026 	 * And we can also fail with pagereading if there are mandatory locks
3027 	 * on pages affected by this read but not on the region from pos to
3028 	 * pos+len-1.
3029 	 */
3030 	if (!CIFS_CACHE_READ(cinode))
3031 		return netfs_unbuffered_read_iter(iocb, to);
3032 
3033 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0) {
3034 		if (iocb->ki_flags & IOCB_DIRECT)
3035 			return netfs_unbuffered_read_iter(iocb, to);
3036 		return netfs_buffered_read_iter(iocb, to);
3037 	}
3038 
3039 	/*
3040 	 * We need to hold the sem to be sure nobody modifies lock list
3041 	 * with a brlock that prevents reading.
3042 	 */
3043 	if (iocb->ki_flags & IOCB_DIRECT) {
3044 		rc = netfs_start_io_direct(inode);
3045 		if (rc < 0)
3046 			goto out;
3047 		rc = -EACCES;
3048 		down_read(&cinode->lock_sem);
3049 		if (!cifs_find_lock_conflict(
3050 			    cfile, iocb->ki_pos, iov_iter_count(to),
3051 			    tcon->ses->server->vals->shared_lock_type,
3052 			    0, NULL, CIFS_READ_OP))
3053 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
3054 		up_read(&cinode->lock_sem);
3055 		netfs_end_io_direct(inode);
3056 	} else {
3057 		rc = netfs_start_io_read(inode);
3058 		if (rc < 0)
3059 			goto out;
3060 		rc = -EACCES;
3061 		down_read(&cinode->lock_sem);
3062 		if (!cifs_find_lock_conflict(
3063 			    cfile, iocb->ki_pos, iov_iter_count(to),
3064 			    tcon->ses->server->vals->shared_lock_type,
3065 			    0, NULL, CIFS_READ_OP))
3066 			rc = filemap_read(iocb, to, 0);
3067 		up_read(&cinode->lock_sem);
3068 		netfs_end_io_read(inode);
3069 	}
3070 out:
3071 	return rc;
3072 }
3073 
cifs_page_mkwrite(struct vm_fault * vmf)3074 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3075 {
3076 	return netfs_page_mkwrite(vmf, NULL);
3077 }
3078 
3079 static const struct vm_operations_struct cifs_file_vm_ops = {
3080 	.fault = filemap_fault,
3081 	.map_pages = filemap_map_pages,
3082 	.page_mkwrite = cifs_page_mkwrite,
3083 };
3084 
cifs_file_strict_mmap_prepare(struct vm_area_desc * desc)3085 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3086 {
3087 	int xid, rc = 0;
3088 	struct inode *inode = file_inode(desc->file);
3089 
3090 	xid = get_xid();
3091 
3092 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3093 		rc = cifs_zap_mapping(inode);
3094 	if (!rc)
3095 		rc = generic_file_mmap_prepare(desc);
3096 	if (!rc)
3097 		desc->vm_ops = &cifs_file_vm_ops;
3098 
3099 	free_xid(xid);
3100 	return rc;
3101 }
3102 
cifs_file_mmap_prepare(struct vm_area_desc * desc)3103 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3104 {
3105 	int rc, xid;
3106 
3107 	xid = get_xid();
3108 
3109 	rc = cifs_revalidate_file(desc->file);
3110 	if (rc)
3111 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3112 			 rc);
3113 	if (!rc)
3114 		rc = generic_file_mmap_prepare(desc);
3115 	if (!rc)
3116 		desc->vm_ops = &cifs_file_vm_ops;
3117 
3118 	free_xid(xid);
3119 	return rc;
3120 }
3121 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3122 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3123 {
3124 	struct cifsFileInfo *open_file;
3125 
3126 	spin_lock(&cifs_inode->open_file_lock);
3127 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3128 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3129 			spin_unlock(&cifs_inode->open_file_lock);
3130 			return 1;
3131 		}
3132 	}
3133 	spin_unlock(&cifs_inode->open_file_lock);
3134 	return 0;
3135 }
3136 
3137 /* We do not want to update the file size from server for inodes
3138    open for write - to avoid races with writepage extending
3139    the file - in the future we could consider allowing
3140    refreshing the inode only on increases in the file size
3141    but this is tricky to do without racing with writebehind
3142    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3143 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3144 			    bool from_readdir)
3145 {
3146 	if (!cifsInode)
3147 		return true;
3148 
3149 	if (is_inode_writable(cifsInode) ||
3150 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3151 		/* This inode is open for write at least once */
3152 		struct cifs_sb_info *cifs_sb = CIFS_SB(cifsInode);
3153 
3154 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DIRECT_IO) {
3155 			/* since no page cache to corrupt on directio
3156 			we can change size safely */
3157 			return true;
3158 		}
3159 
3160 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3161 			return true;
3162 
3163 		return false;
3164 	} else
3165 		return true;
3166 }
3167 
cifs_oplock_break(struct work_struct * work)3168 void cifs_oplock_break(struct work_struct *work)
3169 {
3170 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3171 						  oplock_break);
3172 	struct inode *inode = d_inode(cfile->dentry);
3173 	struct super_block *sb = inode->i_sb;
3174 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3175 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3176 	bool cache_read, cache_write, cache_handle;
3177 	struct cifs_tcon *tcon;
3178 	struct TCP_Server_Info *server;
3179 	struct tcon_link *tlink;
3180 	unsigned int oplock;
3181 	int rc = 0;
3182 	bool purge_cache = false, oplock_break_cancelled;
3183 	__u64 persistent_fid, volatile_fid;
3184 	__u16 net_fid;
3185 
3186 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3187 			TASK_UNINTERRUPTIBLE);
3188 
3189 	tlink = cifs_sb_tlink(cifs_sb);
3190 	if (IS_ERR(tlink))
3191 		goto out;
3192 	tcon = tlink_tcon(tlink);
3193 	server = tcon->ses->server;
3194 
3195 	scoped_guard(spinlock, &cinode->open_file_lock) {
3196 		unsigned int sbflags = cifs_sb_flags(cifs_sb);
3197 
3198 		server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3199 					      cfile->oplock_epoch, &purge_cache);
3200 		oplock = READ_ONCE(cinode->oplock);
3201 		cache_read = (oplock & CIFS_CACHE_READ_FLG) ||
3202 			(sbflags & CIFS_MOUNT_RO_CACHE);
3203 		cache_write = (oplock & CIFS_CACHE_WRITE_FLG) ||
3204 			(sbflags & CIFS_MOUNT_RW_CACHE);
3205 		cache_handle = oplock & CIFS_CACHE_HANDLE_FLG;
3206 	}
3207 
3208 	if (!cache_write && cache_read && cifs_has_mand_locks(cinode)) {
3209 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3210 			 inode);
3211 		cifs_reset_oplock(cinode);
3212 		oplock = 0;
3213 		cache_read = cache_write = cache_handle = false;
3214 	}
3215 
3216 	if (S_ISREG(inode->i_mode)) {
3217 		if (cache_read)
3218 			break_lease(inode, O_RDONLY);
3219 		else
3220 			break_lease(inode, O_WRONLY);
3221 		rc = filemap_fdatawrite(inode->i_mapping);
3222 		if (!cache_read || purge_cache) {
3223 			rc = filemap_fdatawait(inode->i_mapping);
3224 			mapping_set_error(inode->i_mapping, rc);
3225 			cifs_zap_mapping(inode);
3226 		}
3227 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3228 		if (cache_write)
3229 			goto oplock_break_ack;
3230 	}
3231 
3232 	rc = cifs_push_locks(cfile);
3233 	if (rc)
3234 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3235 
3236 oplock_break_ack:
3237 	/*
3238 	 * When oplock break is received and there are no active
3239 	 * file handles but cached, then schedule deferred close immediately.
3240 	 * So, new open will not use cached handle.
3241 	 */
3242 
3243 	if (!cache_handle && !list_empty(&cinode->deferred_closes))
3244 		cifs_close_deferred_file(cinode);
3245 
3246 	persistent_fid = cfile->fid.persistent_fid;
3247 	volatile_fid = cfile->fid.volatile_fid;
3248 	net_fid = cfile->fid.netfid;
3249 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3250 
3251 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3252 	/*
3253 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3254 	 * an acknowledgment to be sent when the file has already been closed.
3255 	 */
3256 	spin_lock(&cinode->open_file_lock);
3257 	/* check list empty since can race with kill_sb calling tree disconnect */
3258 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3259 		spin_unlock(&cinode->open_file_lock);
3260 		rc = server->ops->oplock_response(tcon, persistent_fid,
3261 						  volatile_fid, net_fid,
3262 						  cinode, oplock);
3263 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3264 	} else
3265 		spin_unlock(&cinode->open_file_lock);
3266 
3267 	cifs_put_tlink(tlink);
3268 out:
3269 	cifs_done_oplock_break(cinode);
3270 }
3271 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3272 static int cifs_swap_activate(struct swap_info_struct *sis,
3273 			      struct file *swap_file, sector_t *span)
3274 {
3275 	struct cifsFileInfo *cfile = swap_file->private_data;
3276 	struct inode *inode = swap_file->f_mapping->host;
3277 	unsigned long blocks;
3278 	long long isize;
3279 
3280 	cifs_dbg(FYI, "swap activate\n");
3281 
3282 	if (!swap_file->f_mapping->a_ops->swap_rw)
3283 		/* Cannot support swap */
3284 		return -EINVAL;
3285 
3286 	spin_lock(&inode->i_lock);
3287 	blocks = inode->i_blocks;
3288 	isize = inode->i_size;
3289 	spin_unlock(&inode->i_lock);
3290 	if (blocks*512 < isize) {
3291 		pr_warn("swap activate: swapfile has holes\n");
3292 		return -EINVAL;
3293 	}
3294 	*span = sis->pages;
3295 
3296 	pr_warn_once("Swap support over SMB3 is experimental\n");
3297 
3298 	/*
3299 	 * TODO: consider adding ACL (or documenting how) to prevent other
3300 	 * users (on this or other systems) from reading it
3301 	 */
3302 
3303 
3304 	/* TODO: add sk_set_memalloc(inet) or similar */
3305 
3306 	if (cfile)
3307 		cfile->swapfile = true;
3308 	/*
3309 	 * TODO: Since file already open, we can't open with DENY_ALL here
3310 	 * but we could add call to grab a byte range lock to prevent others
3311 	 * from reading or writing the file
3312 	 */
3313 
3314 	sis->flags |= SWP_FS_OPS;
3315 	return add_swap_extent(sis, 0, sis->max, 0);
3316 }
3317 
cifs_swap_deactivate(struct file * file)3318 static void cifs_swap_deactivate(struct file *file)
3319 {
3320 	struct cifsFileInfo *cfile = file->private_data;
3321 
3322 	cifs_dbg(FYI, "swap deactivate\n");
3323 
3324 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3325 
3326 	if (cfile)
3327 		cfile->swapfile = false;
3328 
3329 	/* do we need to unpin (or unlock) the file */
3330 }
3331 
3332 /**
3333  * cifs_swap_rw - SMB3 address space operation for swap I/O
3334  * @iocb: target I/O control block
3335  * @iter: I/O buffer
3336  *
3337  * Perform IO to the swap-file.  This is much like direct IO.
3338  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3339 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3340 {
3341 	ssize_t ret;
3342 
3343 	if (iov_iter_rw(iter) == READ)
3344 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3345 	else
3346 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3347 	if (ret < 0)
3348 		return ret;
3349 	return 0;
3350 }
3351 
3352 const struct address_space_operations cifs_addr_ops = {
3353 	.read_folio	= netfs_read_folio,
3354 	.readahead	= netfs_readahead,
3355 	.writepages	= netfs_writepages,
3356 	.dirty_folio	= netfs_dirty_folio,
3357 	.release_folio	= netfs_release_folio,
3358 	.direct_IO	= noop_direct_IO,
3359 	.invalidate_folio = netfs_invalidate_folio,
3360 	.migrate_folio	= filemap_migrate_folio,
3361 	/*
3362 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3363 	 * helper if needed
3364 	 */
3365 	.swap_activate	= cifs_swap_activate,
3366 	.swap_deactivate = cifs_swap_deactivate,
3367 	.swap_rw = cifs_swap_rw,
3368 };
3369 
3370 /*
3371  * cifs_readahead requires the server to support a buffer large enough to
3372  * contain the header plus one complete page of data.  Otherwise, we need
3373  * to leave cifs_readahead out of the address space operations.
3374  */
3375 const struct address_space_operations cifs_addr_ops_smallbuf = {
3376 	.read_folio	= netfs_read_folio,
3377 	.writepages	= netfs_writepages,
3378 	.dirty_folio	= netfs_dirty_folio,
3379 	.release_folio	= netfs_release_folio,
3380 	.invalidate_folio = netfs_invalidate_folio,
3381 	.migrate_folio	= filemap_migrate_folio,
3382 };
3383