xref: /linux/fs/smb/client/file.c (revision 81dc1e4d32b064ac47abc60b0acbf49b66a34d52)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/filelock.h>
14 #include <linux/backing-dev.h>
15 #include <linux/stat.h>
16 #include <linux/fcntl.h>
17 #include <linux/pagemap.h>
18 #include <linux/pagevec.h>
19 #include <linux/writeback.h>
20 #include <linux/task_io_accounting_ops.h>
21 #include <linux/delay.h>
22 #include <linux/mount.h>
23 #include <linux/slab.h>
24 #include <linux/swap.h>
25 #include <linux/mm.h>
26 #include <asm/div64.h>
27 #include "cifsfs.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
56 	size_t wsize = req->rreq.wsize;
57 	int rc;
58 
59 	if (!wdata->have_xid) {
60 		wdata->xid = get_xid();
61 		wdata->have_xid = true;
62 	}
63 
64 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
65 	wdata->server = server;
66 
67 	if (cifs_sb->ctx->wsize == 0)
68 		cifs_negotiate_wsize(server, cifs_sb->ctx,
69 				     tlink_tcon(req->cfile->tlink));
70 
71 retry:
72 	if (open_file->invalidHandle) {
73 		rc = cifs_reopen_file(open_file, false);
74 		if (rc < 0) {
75 			if (rc == -EAGAIN)
76 				goto retry;
77 			subreq->error = rc;
78 			return netfs_prepare_write_failed(subreq);
79 		}
80 	}
81 
82 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
83 					   &wdata->credits);
84 	if (rc < 0) {
85 		subreq->error = rc;
86 		return netfs_prepare_write_failed(subreq);
87 	}
88 
89 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
90 	wdata->credits.rreq_debug_index = subreq->debug_index;
91 	wdata->credits.in_flight_check = 1;
92 	trace_smb3_rw_credits(wdata->rreq->debug_id,
93 			      wdata->subreq.debug_index,
94 			      wdata->credits.value,
95 			      server->credits, server->in_flight,
96 			      wdata->credits.value,
97 			      cifs_trace_rw_credits_write_prepare);
98 
99 #ifdef CONFIG_CIFS_SMB_DIRECT
100 	if (server->smbd_conn) {
101 		const struct smbdirect_socket_parameters *sp =
102 			smbd_get_parameters(server->smbd_conn);
103 
104 		stream->sreq_max_segs = sp->max_frmr_depth;
105 	}
106 #endif
107 }
108 
109 /*
110  * Issue a subrequest to upload to the server.
111  */
cifs_issue_write(struct netfs_io_subrequest * subreq)112 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
113 {
114 	struct cifs_io_subrequest *wdata =
115 		container_of(subreq, struct cifs_io_subrequest, subreq);
116 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
117 	int rc;
118 
119 	if (cifs_forced_shutdown(sbi)) {
120 		rc = smb_EIO(smb_eio_trace_forced_shutdown);
121 		goto fail;
122 	}
123 
124 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
125 	if (rc)
126 		goto fail;
127 
128 	rc = -EAGAIN;
129 	if (wdata->req->cfile->invalidHandle)
130 		goto fail;
131 
132 	wdata->server->ops->async_writev(wdata);
133 out:
134 	return;
135 
136 fail:
137 	if (rc == -EAGAIN)
138 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
139 	else
140 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
141 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
142 	cifs_write_subrequest_terminated(wdata, rc);
143 	goto out;
144 }
145 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)146 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
147 {
148 	cifs_invalidate_cache(wreq->inode, 0);
149 }
150 
151 /*
152  * Negotiate the size of a read operation on behalf of the netfs library.
153  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)154 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
155 {
156 	struct netfs_io_request *rreq = subreq->rreq;
157 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
158 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
159 	struct TCP_Server_Info *server;
160 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
161 	size_t size;
162 	int rc = 0;
163 
164 	if (!rdata->have_xid) {
165 		rdata->xid = get_xid();
166 		rdata->have_xid = true;
167 	}
168 
169 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
170 	rdata->server = server;
171 
172 	if (cifs_sb->ctx->rsize == 0)
173 		cifs_negotiate_rsize(server, cifs_sb->ctx,
174 				     tlink_tcon(req->cfile->tlink));
175 
176 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
177 					   &size, &rdata->credits);
178 	if (rc)
179 		return rc;
180 
181 	rreq->io_streams[0].sreq_max_len = size;
182 
183 	rdata->credits.in_flight_check = 1;
184 	rdata->credits.rreq_debug_id = rreq->debug_id;
185 	rdata->credits.rreq_debug_index = subreq->debug_index;
186 
187 	trace_smb3_rw_credits(rdata->rreq->debug_id,
188 			      rdata->subreq.debug_index,
189 			      rdata->credits.value,
190 			      server->credits, server->in_flight, 0,
191 			      cifs_trace_rw_credits_read_submit);
192 
193 #ifdef CONFIG_CIFS_SMB_DIRECT
194 	if (server->smbd_conn) {
195 		const struct smbdirect_socket_parameters *sp =
196 			smbd_get_parameters(server->smbd_conn);
197 
198 		rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
199 	}
200 #endif
201 	return 0;
202 }
203 
204 /*
205  * Issue a read operation on behalf of the netfs helper functions.  We're asked
206  * to make a read of a certain size at a point in the file.  We are permitted
207  * to only read a portion of that, but as long as we read something, the netfs
208  * helper will call us again so that we can issue another read.
209  */
cifs_issue_read(struct netfs_io_subrequest * subreq)210 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
211 {
212 	struct netfs_io_request *rreq = subreq->rreq;
213 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
214 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
215 	struct TCP_Server_Info *server = rdata->server;
216 	int rc = 0;
217 
218 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
219 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
220 		 subreq->transferred, subreq->len);
221 
222 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
223 	if (rc)
224 		goto failed;
225 
226 	if (req->cfile->invalidHandle) {
227 		do {
228 			rc = cifs_reopen_file(req->cfile, true);
229 		} while (rc == -EAGAIN);
230 		if (rc)
231 			goto failed;
232 	}
233 
234 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
235 	    subreq->rreq->origin != NETFS_DIO_READ)
236 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
237 
238 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
239 	rc = rdata->server->ops->async_readv(rdata);
240 	if (rc)
241 		goto failed;
242 	return;
243 
244 failed:
245 	subreq->error = rc;
246 	netfs_read_subreq_terminated(subreq);
247 }
248 
249 /*
250  * Writeback calls this when it finds a folio that needs uploading.  This isn't
251  * called if writeback only has copy-to-cache to deal with.
252  */
cifs_begin_writeback(struct netfs_io_request * wreq)253 static void cifs_begin_writeback(struct netfs_io_request *wreq)
254 {
255 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
256 	int ret;
257 
258 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_ANY, &req->cfile);
259 	if (ret) {
260 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
261 		return;
262 	}
263 
264 	wreq->io_streams[0].avail = true;
265 }
266 
267 /*
268  * Initialise a request.
269  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)270 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
271 {
272 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
273 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode);
274 	struct cifsFileInfo *open_file = NULL;
275 
276 	rreq->rsize = cifs_sb->ctx->rsize;
277 	rreq->wsize = cifs_sb->ctx->wsize;
278 	req->pid = current->tgid; // Ummm...  This may be a workqueue
279 
280 	if (file) {
281 		open_file = file->private_data;
282 		rreq->netfs_priv = file->private_data;
283 		req->cfile = cifsFileInfo_get(open_file);
284 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RWPIDFORWARD)
285 			req->pid = req->cfile->pid;
286 	} else if (rreq->origin != NETFS_WRITEBACK) {
287 		WARN_ON_ONCE(1);
288 		return smb_EIO1(smb_eio_trace_not_netfs_writeback, rreq->origin);
289 	}
290 
291 	return 0;
292 }
293 
294 /*
295  * Completion of a request operation.
296  */
cifs_rreq_done(struct netfs_io_request * rreq)297 static void cifs_rreq_done(struct netfs_io_request *rreq)
298 {
299 	struct timespec64 atime, mtime;
300 	struct inode *inode = rreq->inode;
301 
302 	/* we do not want atime to be less than mtime, it broke some apps */
303 	atime = inode_set_atime_to_ts(inode, current_time(inode));
304 	mtime = inode_get_mtime(inode);
305 	if (timespec64_compare(&atime, &mtime))
306 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
307 }
308 
cifs_free_request(struct netfs_io_request * rreq)309 static void cifs_free_request(struct netfs_io_request *rreq)
310 {
311 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
312 
313 	if (req->cfile)
314 		cifsFileInfo_put(req->cfile);
315 }
316 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)317 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
318 {
319 	struct cifs_io_subrequest *rdata =
320 		container_of(subreq, struct cifs_io_subrequest, subreq);
321 	int rc = subreq->error;
322 
323 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
324 #ifdef CONFIG_CIFS_SMB_DIRECT
325 		if (rdata->mr) {
326 			smbd_deregister_mr(rdata->mr);
327 			rdata->mr = NULL;
328 		}
329 #endif
330 	}
331 
332 	if (rdata->credits.value != 0) {
333 		trace_smb3_rw_credits(rdata->rreq->debug_id,
334 				      rdata->subreq.debug_index,
335 				      rdata->credits.value,
336 				      rdata->server ? rdata->server->credits : 0,
337 				      rdata->server ? rdata->server->in_flight : 0,
338 				      -rdata->credits.value,
339 				      cifs_trace_rw_credits_free_subreq);
340 		if (rdata->server)
341 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
342 		else
343 			rdata->credits.value = 0;
344 	}
345 
346 	if (rdata->have_xid)
347 		free_xid(rdata->xid);
348 }
349 
350 const struct netfs_request_ops cifs_req_ops = {
351 	.request_pool		= &cifs_io_request_pool,
352 	.subrequest_pool	= &cifs_io_subrequest_pool,
353 	.init_request		= cifs_init_request,
354 	.free_request		= cifs_free_request,
355 	.free_subrequest	= cifs_free_subrequest,
356 	.prepare_read		= cifs_prepare_read,
357 	.issue_read		= cifs_issue_read,
358 	.done			= cifs_rreq_done,
359 	.begin_writeback	= cifs_begin_writeback,
360 	.prepare_write		= cifs_prepare_write,
361 	.issue_write		= cifs_issue_write,
362 	.invalidate_cache	= cifs_netfs_invalidate_cache,
363 };
364 
365 /*
366  * Mark as invalid, all open files on tree connections since they
367  * were closed when session to server was lost.
368  */
369 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)370 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
371 {
372 	struct cifsFileInfo *open_file = NULL;
373 	struct list_head *tmp;
374 	struct list_head *tmp1;
375 
376 	/* only send once per connect */
377 	spin_lock(&tcon->tc_lock);
378 	if (tcon->need_reconnect)
379 		tcon->status = TID_NEED_RECON;
380 
381 	if (tcon->status != TID_NEED_RECON) {
382 		spin_unlock(&tcon->tc_lock);
383 		return;
384 	}
385 	tcon->status = TID_IN_FILES_INVALIDATE;
386 	spin_unlock(&tcon->tc_lock);
387 
388 	/* list all files open on tree connection and mark them invalid */
389 	spin_lock(&tcon->open_file_lock);
390 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
391 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
392 		open_file->invalidHandle = true;
393 		open_file->oplock_break_cancelled = true;
394 	}
395 	spin_unlock(&tcon->open_file_lock);
396 
397 	invalidate_all_cached_dirs(tcon);
398 	spin_lock(&tcon->tc_lock);
399 	if (tcon->status == TID_IN_FILES_INVALIDATE)
400 		tcon->status = TID_NEED_TCON;
401 	spin_unlock(&tcon->tc_lock);
402 
403 	/*
404 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
405 	 * to this tcon.
406 	 */
407 }
408 
cifs_convert_flags(unsigned int oflags,int rdwr_for_fscache)409 static inline int cifs_convert_flags(unsigned int oflags, int rdwr_for_fscache)
410 {
411 	int flags = 0;
412 
413 	if (oflags & O_TMPFILE)
414 		flags |= DELETE;
415 
416 	if ((oflags & O_ACCMODE) == O_RDONLY)
417 		return flags | GENERIC_READ;
418 	if ((oflags & O_ACCMODE) == O_WRONLY) {
419 		return flags | (rdwr_for_fscache == 1 ?
420 				(GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE);
421 	}
422 	if ((oflags & O_ACCMODE) == O_RDWR) {
423 		/* GENERIC_ALL is too much permission to request
424 		   can cause unnecessary access denied on create */
425 		/* return GENERIC_ALL; */
426 		return flags | GENERIC_READ | GENERIC_WRITE;
427 	}
428 
429 	return flags | READ_CONTROL | FILE_WRITE_ATTRIBUTES |
430 		FILE_READ_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA |
431 		FILE_WRITE_DATA | FILE_READ_DATA;
432 }
433 
434 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)435 static u32 cifs_posix_convert_flags(unsigned int flags)
436 {
437 	u32 posix_flags = 0;
438 
439 	if ((flags & O_ACCMODE) == O_RDONLY)
440 		posix_flags = SMB_O_RDONLY;
441 	else if ((flags & O_ACCMODE) == O_WRONLY)
442 		posix_flags = SMB_O_WRONLY;
443 	else if ((flags & O_ACCMODE) == O_RDWR)
444 		posix_flags = SMB_O_RDWR;
445 
446 	if (flags & O_CREAT) {
447 		posix_flags |= SMB_O_CREAT;
448 		if (flags & O_EXCL)
449 			posix_flags |= SMB_O_EXCL;
450 	} else if (flags & O_EXCL)
451 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
452 			 current->comm, current->tgid);
453 
454 	if (flags & O_TRUNC)
455 		posix_flags |= SMB_O_TRUNC;
456 	/* be safe and imply O_SYNC for O_DSYNC */
457 	if (flags & O_DSYNC)
458 		posix_flags |= SMB_O_SYNC;
459 	if (flags & O_DIRECTORY)
460 		posix_flags |= SMB_O_DIRECTORY;
461 	if (flags & O_NOFOLLOW)
462 		posix_flags |= SMB_O_NOFOLLOW;
463 	if (flags & O_DIRECT)
464 		posix_flags |= SMB_O_DIRECT;
465 
466 	return posix_flags;
467 }
468 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
469 
cifs_get_disposition(unsigned int flags)470 static inline int cifs_get_disposition(unsigned int flags)
471 {
472 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
473 		return FILE_CREATE;
474 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
475 		return FILE_OVERWRITE_IF;
476 	else if ((flags & O_CREAT) == O_CREAT)
477 		return FILE_OPEN_IF;
478 	else if ((flags & O_TRUNC) == O_TRUNC)
479 		return FILE_OVERWRITE;
480 	else
481 		return FILE_OPEN;
482 }
483 
484 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)485 int cifs_posix_open(const char *full_path, struct inode **pinode,
486 			struct super_block *sb, int mode, unsigned int f_flags,
487 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
488 {
489 	int rc;
490 	FILE_UNIX_BASIC_INFO *presp_data;
491 	__u32 posix_flags = 0;
492 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
493 	struct cifs_fattr fattr;
494 	struct tcon_link *tlink;
495 	struct cifs_tcon *tcon;
496 
497 	cifs_dbg(FYI, "posix open %s\n", full_path);
498 
499 	presp_data = kzalloc_obj(FILE_UNIX_BASIC_INFO);
500 	if (presp_data == NULL)
501 		return -ENOMEM;
502 
503 	tlink = cifs_sb_tlink(cifs_sb);
504 	if (IS_ERR(tlink)) {
505 		rc = PTR_ERR(tlink);
506 		goto posix_open_ret;
507 	}
508 
509 	tcon = tlink_tcon(tlink);
510 	mode &= ~current_umask();
511 
512 	posix_flags = cifs_posix_convert_flags(f_flags);
513 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
514 			     poplock, full_path, cifs_sb->local_nls,
515 			     cifs_remap(cifs_sb));
516 	cifs_put_tlink(tlink);
517 
518 	if (rc)
519 		goto posix_open_ret;
520 
521 	if (presp_data->Type == cpu_to_le32(-1))
522 		goto posix_open_ret; /* open ok, caller does qpathinfo */
523 
524 	if (!pinode)
525 		goto posix_open_ret; /* caller does not need info */
526 
527 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
528 
529 	/* get new inode and set it up */
530 	if (*pinode == NULL) {
531 		cifs_fill_uniqueid(sb, &fattr);
532 		*pinode = cifs_iget(sb, &fattr);
533 		if (!*pinode) {
534 			rc = -ENOMEM;
535 			goto posix_open_ret;
536 		}
537 	} else {
538 		cifs_revalidate_mapping(*pinode);
539 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
540 	}
541 
542 posix_open_ret:
543 	kfree(presp_data);
544 	return rc;
545 }
546 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
547 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)548 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
549 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
550 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
551 {
552 	int rc;
553 	int desired_access;
554 	int disposition;
555 	int create_options = CREATE_NOT_DIR;
556 	struct TCP_Server_Info *server = tcon->ses->server;
557 	struct cifs_open_parms oparms;
558 	int rdwr_for_fscache = 0;
559 
560 	if (!server->ops->open)
561 		return -ENOSYS;
562 
563 	/* If we're caching, we need to be able to fill in around partial writes. */
564 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
565 		rdwr_for_fscache = 1;
566 
567 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
568 
569 /*********************************************************************
570  *  open flag mapping table:
571  *
572  *	POSIX Flag            CIFS Disposition
573  *	----------            ----------------
574  *	O_CREAT               FILE_OPEN_IF
575  *	O_CREAT | O_EXCL      FILE_CREATE
576  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
577  *	O_TRUNC               FILE_OVERWRITE
578  *	none of the above     FILE_OPEN
579  *
580  *	Note that there is not a direct match between disposition
581  *	FILE_SUPERSEDE (ie create whether or not file exists although
582  *	O_CREAT | O_TRUNC is similar but truncates the existing
583  *	file rather than creating a new file as FILE_SUPERSEDE does
584  *	(which uses the attributes / metadata passed in on open call)
585  *?
586  *?  O_SYNC is a reasonable match to CIFS writethrough flag
587  *?  and the read write flags match reasonably.  O_LARGEFILE
588  *?  is irrelevant because largefile support is always used
589  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
590  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
591  *********************************************************************/
592 
593 	disposition = cifs_get_disposition(f_flags);
594 	/* BB pass O_SYNC flag through on file attributes .. BB */
595 	create_options |= cifs_open_create_options(f_flags, create_options);
596 
597 retry_open:
598 	oparms = (struct cifs_open_parms) {
599 		.tcon = tcon,
600 		.cifs_sb = cifs_sb,
601 		.desired_access = desired_access,
602 		.create_options = cifs_create_options(cifs_sb, create_options),
603 		.disposition = disposition,
604 		.path = full_path,
605 		.fid = fid,
606 	};
607 
608 	rc = server->ops->open(xid, &oparms, oplock, buf);
609 	if (rc) {
610 		if (rc == -EACCES && rdwr_for_fscache == 1) {
611 			desired_access = cifs_convert_flags(f_flags, 0);
612 			rdwr_for_fscache = 2;
613 			goto retry_open;
614 		}
615 		return rc;
616 	}
617 	if (rdwr_for_fscache == 2)
618 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
619 
620 	/* TODO: Add support for calling posix query info but with passing in fid */
621 	if (tcon->unix_ext)
622 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
623 					      xid);
624 	else
625 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
626 					 xid, fid);
627 
628 	if (rc) {
629 		server->ops->close(xid, tcon, fid);
630 		if (rc == -ESTALE)
631 			rc = -EOPENSTALE;
632 	}
633 
634 	return rc;
635 }
636 
637 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)638 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
639 {
640 	struct cifs_fid_locks *cur;
641 	bool has_locks = false;
642 
643 	down_read(&cinode->lock_sem);
644 	list_for_each_entry(cur, &cinode->llist, llist) {
645 		if (!list_empty(&cur->locks)) {
646 			has_locks = true;
647 			break;
648 		}
649 	}
650 	up_read(&cinode->lock_sem);
651 	return has_locks;
652 }
653 
654 void
cifs_down_write(struct rw_semaphore * sem)655 cifs_down_write(struct rw_semaphore *sem)
656 {
657 	while (!down_write_trylock(sem))
658 		msleep(10);
659 }
660 
661 static void cifsFileInfo_put_work(struct work_struct *work);
662 void serverclose_work(struct work_struct *work);
663 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)664 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
665 				       struct tcon_link *tlink, __u32 oplock,
666 				       const char *symlink_target)
667 {
668 	struct dentry *dentry = file_dentry(file);
669 	struct inode *inode = d_inode(dentry);
670 	struct cifsInodeInfo *cinode = CIFS_I(inode);
671 	struct cifsFileInfo *cfile;
672 	struct cifs_fid_locks *fdlocks;
673 	struct cifs_tcon *tcon = tlink_tcon(tlink);
674 	struct TCP_Server_Info *server = tcon->ses->server;
675 
676 	cfile = kzalloc_obj(struct cifsFileInfo);
677 	if (cfile == NULL)
678 		return cfile;
679 
680 	fdlocks = kzalloc_obj(struct cifs_fid_locks);
681 	if (!fdlocks) {
682 		kfree(cfile);
683 		return NULL;
684 	}
685 
686 	if (symlink_target) {
687 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
688 		if (!cfile->symlink_target) {
689 			kfree(fdlocks);
690 			kfree(cfile);
691 			return NULL;
692 		}
693 	}
694 
695 	INIT_LIST_HEAD(&fdlocks->locks);
696 	fdlocks->cfile = cfile;
697 	cfile->llist = fdlocks;
698 
699 	cfile->count = 1;
700 	cfile->pid = current->tgid;
701 	cfile->uid = current_fsuid();
702 	cfile->dentry = dget(dentry);
703 	cfile->f_flags = file->f_flags;
704 	cfile->invalidHandle = false;
705 	cfile->deferred_close_scheduled = false;
706 	cfile->status_file_deleted = file->f_flags & O_TMPFILE;
707 	cfile->tlink = cifs_get_tlink(tlink);
708 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
709 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
710 	INIT_WORK(&cfile->serverclose, serverclose_work);
711 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
712 	mutex_init(&cfile->fh_mutex);
713 	spin_lock_init(&cfile->file_info_lock);
714 
715 	/*
716 	 * If the server returned a read oplock and we have mandatory brlocks,
717 	 * set oplock level to None.
718 	 */
719 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
720 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
721 		oplock = 0;
722 	}
723 
724 	cifs_down_write(&cinode->lock_sem);
725 	list_add(&fdlocks->llist, &cinode->llist);
726 	up_write(&cinode->lock_sem);
727 
728 	spin_lock(&tcon->open_file_lock);
729 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
730 		oplock = fid->pending_open->oplock;
731 	list_del(&fid->pending_open->olist);
732 
733 	list_add(&cfile->tlist, &tcon->openFileList);
734 	atomic_inc(&tcon->num_local_opens);
735 
736 	/* if readable file instance put first in list*/
737 	spin_lock(&cinode->open_file_lock);
738 	if (file->f_flags & O_TMPFILE)
739 		set_bit(CIFS_INO_TMPFILE, &cinode->flags);
740 	fid->purge_cache = false;
741 	server->ops->set_fid(cfile, fid, oplock);
742 
743 	if (file->f_mode & FMODE_READ)
744 		list_add(&cfile->flist, &cinode->openFileList);
745 	else
746 		list_add_tail(&cfile->flist, &cinode->openFileList);
747 	spin_unlock(&cinode->open_file_lock);
748 	spin_unlock(&tcon->open_file_lock);
749 
750 	if (fid->purge_cache)
751 		cifs_zap_mapping(inode);
752 
753 	file->private_data = cfile;
754 	return cfile;
755 }
756 
757 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)758 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
759 {
760 	spin_lock(&cifs_file->file_info_lock);
761 	cifsFileInfo_get_locked(cifs_file);
762 	spin_unlock(&cifs_file->file_info_lock);
763 	return cifs_file;
764 }
765 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)766 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
767 {
768 	struct inode *inode = d_inode(cifs_file->dentry);
769 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
770 	struct cifsLockInfo *li, *tmp;
771 
772 	/*
773 	 * Delete any outstanding lock records. We'll lose them when the file
774 	 * is closed anyway.
775 	 */
776 	cifs_down_write(&cifsi->lock_sem);
777 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
778 		list_del(&li->llist);
779 		cifs_del_lock_waiters(li);
780 		kfree(li);
781 	}
782 	list_del(&cifs_file->llist->llist);
783 	kfree(cifs_file->llist);
784 	up_write(&cifsi->lock_sem);
785 
786 	cifs_put_tlink(cifs_file->tlink);
787 	dput(cifs_file->dentry);
788 	kfree(cifs_file->symlink_target);
789 	kfree(cifs_file);
790 }
791 
cifsFileInfo_put_work(struct work_struct * work)792 static void cifsFileInfo_put_work(struct work_struct *work)
793 {
794 	struct cifsFileInfo *cifs_file = container_of(work,
795 			struct cifsFileInfo, put);
796 
797 	cifsFileInfo_put_final(cifs_file);
798 }
799 
serverclose_work(struct work_struct * work)800 void serverclose_work(struct work_struct *work)
801 {
802 	struct cifsFileInfo *cifs_file = container_of(work,
803 			struct cifsFileInfo, serverclose);
804 
805 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
806 
807 	struct TCP_Server_Info *server = tcon->ses->server;
808 	int rc = 0;
809 	int retries = 0;
810 	int MAX_RETRIES = 4;
811 
812 	do {
813 		if (server->ops->close_getattr)
814 			rc = server->ops->close_getattr(0, tcon, cifs_file);
815 		else if (server->ops->close)
816 			rc = server->ops->close(0, tcon, &cifs_file->fid);
817 
818 		if (rc == -EBUSY || rc == -EAGAIN) {
819 			retries++;
820 			msleep(250);
821 		}
822 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
823 	);
824 
825 	if (retries == MAX_RETRIES)
826 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
827 
828 	if (cifs_file->offload)
829 		queue_work(fileinfo_put_wq, &cifs_file->put);
830 	else
831 		cifsFileInfo_put_final(cifs_file);
832 }
833 
834 /**
835  * cifsFileInfo_put - release a reference of file priv data
836  *
837  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
838  *
839  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
840  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)841 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
842 {
843 	_cifsFileInfo_put(cifs_file, true, true);
844 }
845 
846 /**
847  * _cifsFileInfo_put - release a reference of file priv data
848  *
849  * This may involve closing the filehandle @cifs_file out on the
850  * server. Must be called without holding tcon->open_file_lock,
851  * cinode->open_file_lock and cifs_file->file_info_lock.
852  *
853  * If @wait_for_oplock_handler is true and we are releasing the last
854  * reference, wait for any running oplock break handler of the file
855  * and cancel any pending one.
856  *
857  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
858  * @wait_oplock_handler: must be false if called from oplock_break_handler
859  * @offload:	not offloaded on close and oplock breaks
860  *
861  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)862 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
863 		       bool wait_oplock_handler, bool offload)
864 {
865 	struct inode *inode = d_inode(cifs_file->dentry);
866 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
867 	struct TCP_Server_Info *server = tcon->ses->server;
868 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
869 	struct super_block *sb = inode->i_sb;
870 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
871 	struct cifs_fid fid = {};
872 	struct cifs_pending_open open;
873 	bool oplock_break_cancelled;
874 	bool serverclose_offloaded = false;
875 
876 	spin_lock(&tcon->open_file_lock);
877 	spin_lock(&cifsi->open_file_lock);
878 	spin_lock(&cifs_file->file_info_lock);
879 
880 	cifs_file->offload = offload;
881 	if (--cifs_file->count > 0) {
882 		spin_unlock(&cifs_file->file_info_lock);
883 		spin_unlock(&cifsi->open_file_lock);
884 		spin_unlock(&tcon->open_file_lock);
885 		return;
886 	}
887 	spin_unlock(&cifs_file->file_info_lock);
888 
889 	if (server->ops->get_lease_key)
890 		server->ops->get_lease_key(inode, &fid);
891 
892 	/* store open in pending opens to make sure we don't miss lease break */
893 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
894 
895 	/* remove it from the lists */
896 	list_del(&cifs_file->flist);
897 	list_del(&cifs_file->tlist);
898 	atomic_dec(&tcon->num_local_opens);
899 
900 	if (list_empty(&cifsi->openFileList)) {
901 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
902 			 d_inode(cifs_file->dentry));
903 		/*
904 		 * In strict cache mode we need invalidate mapping on the last
905 		 * close  because it may cause a error when we open this file
906 		 * again and get at least level II oplock.
907 		 */
908 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_STRICT_IO)
909 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
910 		cifs_set_oplock_level(cifsi, 0);
911 	}
912 
913 	spin_unlock(&cifsi->open_file_lock);
914 	spin_unlock(&tcon->open_file_lock);
915 
916 	oplock_break_cancelled = wait_oplock_handler ?
917 		cancel_work_sync(&cifs_file->oplock_break) : false;
918 
919 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
920 		struct TCP_Server_Info *server = tcon->ses->server;
921 		unsigned int xid;
922 		int rc = 0;
923 
924 		xid = get_xid();
925 		if (server->ops->close_getattr)
926 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
927 		else if (server->ops->close)
928 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
929 		_free_xid(xid);
930 
931 		if (rc == -EBUSY || rc == -EAGAIN) {
932 			// Server close failed, hence offloading it as an async op
933 			queue_work(serverclose_wq, &cifs_file->serverclose);
934 			serverclose_offloaded = true;
935 		}
936 	}
937 
938 	if (oplock_break_cancelled)
939 		cifs_done_oplock_break(cifsi);
940 
941 	cifs_del_pending_open(&open);
942 
943 	// if serverclose has been offloaded to wq (on failure), it will
944 	// handle offloading put as well. If serverclose not offloaded,
945 	// we need to handle offloading put here.
946 	if (!serverclose_offloaded) {
947 		if (offload)
948 			queue_work(fileinfo_put_wq, &cifs_file->put);
949 		else
950 			cifsFileInfo_put_final(cifs_file);
951 	}
952 }
953 
cifs_file_flush(const unsigned int xid,struct inode * inode,struct cifsFileInfo * cfile)954 int cifs_file_flush(const unsigned int xid, struct inode *inode,
955 		    struct cifsFileInfo *cfile)
956 {
957 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
958 	struct cifs_tcon *tcon;
959 	int rc;
960 
961 	if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)
962 		return 0;
963 
964 	if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
965 		tcon = tlink_tcon(cfile->tlink);
966 		return tcon->ses->server->ops->flush(xid, tcon,
967 						     &cfile->fid);
968 	}
969 	rc = cifs_get_writable_file(CIFS_I(inode), FIND_ANY, &cfile);
970 	if (!rc) {
971 		tcon = tlink_tcon(cfile->tlink);
972 		rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
973 		cifsFileInfo_put(cfile);
974 	} else if (rc == -EBADF) {
975 		rc = 0;
976 	}
977 	return rc;
978 }
979 
cifs_do_truncate(const unsigned int xid,struct dentry * dentry)980 static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
981 {
982 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
983 	struct inode *inode = d_inode(dentry);
984 	struct cifsFileInfo *cfile = NULL;
985 	struct TCP_Server_Info *server;
986 	struct cifs_tcon *tcon;
987 	int rc;
988 
989 	rc = filemap_write_and_wait(inode->i_mapping);
990 	if (is_interrupt_error(rc))
991 		return -ERESTARTSYS;
992 	mapping_set_error(inode->i_mapping, rc);
993 
994 	cfile = find_writable_file(cinode, FIND_FSUID_ONLY);
995 	rc = cifs_file_flush(xid, inode, cfile);
996 	if (!rc) {
997 		if (cfile) {
998 			tcon = tlink_tcon(cfile->tlink);
999 			server = tcon->ses->server;
1000 			rc = server->ops->set_file_size(xid, tcon,
1001 							cfile, 0, false);
1002 		}
1003 		if (!rc) {
1004 			netfs_resize_file(&cinode->netfs, 0, true);
1005 			cifs_setsize(inode, 0);
1006 		}
1007 	}
1008 	if (cfile)
1009 		cifsFileInfo_put(cfile);
1010 	return rc;
1011 }
1012 
cifs_open(struct inode * inode,struct file * file)1013 int cifs_open(struct inode *inode, struct file *file)
1014 
1015 {
1016 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
1017 	struct cifs_open_info_data data = {};
1018 	struct cifsFileInfo *cfile = NULL;
1019 	struct TCP_Server_Info *server;
1020 	struct cifs_pending_open open;
1021 	bool posix_open_ok = false;
1022 	struct cifs_fid fid = {};
1023 	struct tcon_link *tlink;
1024 	struct cifs_tcon *tcon;
1025 	const char *full_path;
1026 	unsigned int sbflags;
1027 	int rc = -EACCES;
1028 	unsigned int xid;
1029 	__u32 oplock;
1030 	void *page;
1031 
1032 	xid = get_xid();
1033 
1034 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
1035 		free_xid(xid);
1036 		return smb_EIO(smb_eio_trace_forced_shutdown);
1037 	}
1038 
1039 	tlink = cifs_sb_tlink(cifs_sb);
1040 	if (IS_ERR(tlink)) {
1041 		free_xid(xid);
1042 		return PTR_ERR(tlink);
1043 	}
1044 	tcon = tlink_tcon(tlink);
1045 	server = tcon->ses->server;
1046 
1047 	page = alloc_dentry_path();
1048 	full_path = build_path_from_dentry(file_dentry(file), page);
1049 	if (IS_ERR(full_path)) {
1050 		rc = PTR_ERR(full_path);
1051 		goto out;
1052 	}
1053 
1054 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
1055 		 inode, file->f_flags, full_path);
1056 
1057 	sbflags = cifs_sb_flags(cifs_sb);
1058 	if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) {
1059 		if (sbflags & CIFS_MOUNT_NO_BRL)
1060 			file->f_op = &cifs_file_direct_nobrl_ops;
1061 		else
1062 			file->f_op = &cifs_file_direct_ops;
1063 	}
1064 
1065 	if (file->f_flags & O_TRUNC) {
1066 		rc = cifs_do_truncate(xid, file_dentry(file));
1067 		if (rc)
1068 			goto out;
1069 	}
1070 
1071 	/* Get the cached handle as SMB2 close is deferred */
1072 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1073 		rc = __cifs_get_writable_file(CIFS_I(inode),
1074 					      FIND_FSUID_ONLY |
1075 					      FIND_NO_PENDING_DELETE |
1076 					      FIND_OPEN_FLAGS,
1077 					      file->f_flags, &cfile);
1078 	} else {
1079 		cfile = __find_readable_file(CIFS_I(inode),
1080 					     FIND_NO_PENDING_DELETE |
1081 					     FIND_OPEN_FLAGS,
1082 					     file->f_flags);
1083 		rc = cfile ? 0 : -ENOENT;
1084 	}
1085 	if (rc == 0) {
1086 		file->private_data = cfile;
1087 		spin_lock(&CIFS_I(inode)->deferred_lock);
1088 		cifs_del_deferred_close(cfile);
1089 		spin_unlock(&CIFS_I(inode)->deferred_lock);
1090 		goto use_cache;
1091 	}
1092 	/* hard link on the deferred close file */
1093 	rc = cifs_get_hardlink_path(tcon, inode, file);
1094 	if (rc)
1095 		cifs_close_deferred_file(CIFS_I(inode));
1096 
1097 	if (server->oplocks)
1098 		oplock = REQ_OPLOCK;
1099 	else
1100 		oplock = 0;
1101 
1102 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1103 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1104 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1105 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1106 		/* can not refresh inode info since size could be stale */
1107 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1108 				cifs_sb->ctx->file_mode /* ignored */,
1109 				file->f_flags, &oplock, &fid.netfid, xid);
1110 		if (rc == 0) {
1111 			cifs_dbg(FYI, "posix open succeeded\n");
1112 			posix_open_ok = true;
1113 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1114 			if (tcon->ses->serverNOS)
1115 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1116 					 tcon->ses->ip_addr,
1117 					 tcon->ses->serverNOS);
1118 			tcon->broken_posix_open = true;
1119 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1120 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1121 			goto out;
1122 		/*
1123 		 * Else fallthrough to retry open the old way on network i/o
1124 		 * or DFS errors.
1125 		 */
1126 	}
1127 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1128 
1129 	if (server->ops->get_lease_key)
1130 		server->ops->get_lease_key(inode, &fid);
1131 
1132 	cifs_add_pending_open(&fid, tlink, &open);
1133 
1134 	if (!posix_open_ok) {
1135 		if (server->ops->get_lease_key)
1136 			server->ops->get_lease_key(inode, &fid);
1137 
1138 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1139 				  xid, &data);
1140 		if (rc) {
1141 			cifs_del_pending_open(&open);
1142 			goto out;
1143 		}
1144 	}
1145 
1146 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1147 	if (cfile == NULL) {
1148 		if (server->ops->close)
1149 			server->ops->close(xid, tcon, &fid);
1150 		cifs_del_pending_open(&open);
1151 		rc = -ENOMEM;
1152 		goto out;
1153 	}
1154 
1155 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1156 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1157 		/*
1158 		 * Time to set mode which we can not set earlier due to
1159 		 * problems creating new read-only files.
1160 		 */
1161 		struct cifs_unix_set_info_args args = {
1162 			.mode	= inode->i_mode,
1163 			.uid	= INVALID_UID, /* no change */
1164 			.gid	= INVALID_GID, /* no change */
1165 			.ctime	= NO_CHANGE_64,
1166 			.atime	= NO_CHANGE_64,
1167 			.mtime	= NO_CHANGE_64,
1168 			.device	= 0,
1169 		};
1170 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1171 				       cfile->pid);
1172 	}
1173 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1174 
1175 use_cache:
1176 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1177 			   file->f_mode & FMODE_WRITE);
1178 	if (!(file->f_flags & O_DIRECT))
1179 		goto out;
1180 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1181 		goto out;
1182 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1183 
1184 out:
1185 	free_dentry_path(page);
1186 	free_xid(xid);
1187 	cifs_put_tlink(tlink);
1188 	cifs_free_open_info(&data);
1189 	return rc;
1190 }
1191 
1192 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1193 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1194 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1195 
1196 /*
1197  * Try to reacquire byte range locks that were released when session
1198  * to server was lost.
1199  */
1200 static int
cifs_relock_file(struct cifsFileInfo * cfile)1201 cifs_relock_file(struct cifsFileInfo *cfile)
1202 {
1203 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1204 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1205 	int rc = 0;
1206 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1207 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
1208 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1209 
1210 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1211 	if (cinode->can_cache_brlcks) {
1212 		/* can cache locks - no need to relock */
1213 		up_read(&cinode->lock_sem);
1214 		return rc;
1215 	}
1216 
1217 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1218 	if (cap_unix(tcon->ses) &&
1219 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1220 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
1221 		rc = cifs_push_posix_locks(cfile);
1222 	else
1223 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1224 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1225 
1226 	up_read(&cinode->lock_sem);
1227 	return rc;
1228 }
1229 
1230 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1231 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1232 {
1233 	int rc = -EACCES;
1234 	unsigned int xid;
1235 	__u32 oplock;
1236 	struct cifs_sb_info *cifs_sb;
1237 	struct cifs_tcon *tcon;
1238 	struct TCP_Server_Info *server;
1239 	struct cifsInodeInfo *cinode;
1240 	struct inode *inode;
1241 	void *page;
1242 	const char *full_path;
1243 	int desired_access;
1244 	int disposition = FILE_OPEN;
1245 	int create_options = CREATE_NOT_DIR;
1246 	struct cifs_open_parms oparms;
1247 	int rdwr_for_fscache = 0;
1248 
1249 	xid = get_xid();
1250 	mutex_lock(&cfile->fh_mutex);
1251 	if (!cfile->invalidHandle) {
1252 		mutex_unlock(&cfile->fh_mutex);
1253 		free_xid(xid);
1254 		return 0;
1255 	}
1256 
1257 	inode = d_inode(cfile->dentry);
1258 	cifs_sb = CIFS_SB(inode->i_sb);
1259 	tcon = tlink_tcon(cfile->tlink);
1260 	server = tcon->ses->server;
1261 
1262 	/*
1263 	 * Can not grab rename sem here because various ops, including those
1264 	 * that already have the rename sem can end up causing writepage to get
1265 	 * called and if the server was down that means we end up here, and we
1266 	 * can never tell if the caller already has the rename_sem.
1267 	 */
1268 	page = alloc_dentry_path();
1269 	full_path = build_path_from_dentry(cfile->dentry, page);
1270 	if (IS_ERR(full_path)) {
1271 		mutex_unlock(&cfile->fh_mutex);
1272 		free_dentry_path(page);
1273 		free_xid(xid);
1274 		return PTR_ERR(full_path);
1275 	}
1276 
1277 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1278 		 inode, cfile->f_flags, full_path);
1279 
1280 	if (tcon->ses->server->oplocks)
1281 		oplock = REQ_OPLOCK;
1282 	else
1283 		oplock = 0;
1284 
1285 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1286 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1287 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1288 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1289 		/*
1290 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1291 		 * original open. Must mask them off for a reopen.
1292 		 */
1293 		unsigned int oflags = cfile->f_flags &
1294 						~(O_CREAT | O_EXCL | O_TRUNC);
1295 
1296 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1297 				     cifs_sb->ctx->file_mode /* ignored */,
1298 				     oflags, &oplock, &cfile->fid.netfid, xid);
1299 		if (rc == 0) {
1300 			cifs_dbg(FYI, "posix reopen succeeded\n");
1301 			oparms.reconnect = true;
1302 			goto reopen_success;
1303 		}
1304 		/*
1305 		 * fallthrough to retry open the old way on errors, especially
1306 		 * in the reconnect path it is important to retry hard
1307 		 */
1308 	}
1309 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1310 
1311 	/* If we're caching, we need to be able to fill in around partial writes. */
1312 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1313 		rdwr_for_fscache = 1;
1314 
1315 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1316 	create_options |= cifs_open_create_options(cfile->f_flags,
1317 						   create_options);
1318 
1319 	if (server->ops->get_lease_key)
1320 		server->ops->get_lease_key(inode, &cfile->fid);
1321 
1322 retry_open:
1323 	oparms = (struct cifs_open_parms) {
1324 		.tcon = tcon,
1325 		.cifs_sb = cifs_sb,
1326 		.desired_access = desired_access,
1327 		.create_options = cifs_create_options(cifs_sb, create_options),
1328 		.disposition = disposition,
1329 		.path = full_path,
1330 		.fid = &cfile->fid,
1331 		.reconnect = true,
1332 	};
1333 
1334 	/*
1335 	 * Can not refresh inode by passing in file_info buf to be returned by
1336 	 * ops->open and then calling get_inode_info with returned buf since
1337 	 * file might have write behind data that needs to be flushed and server
1338 	 * version of file size can be stale. If we knew for sure that inode was
1339 	 * not dirty locally we could do this.
1340 	 */
1341 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1342 	if (rc == -ENOENT && oparms.reconnect == false) {
1343 		/* durable handle timeout is expired - open the file again */
1344 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1345 		/* indicate that we need to relock the file */
1346 		oparms.reconnect = true;
1347 	}
1348 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1349 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1350 		rdwr_for_fscache = 2;
1351 		goto retry_open;
1352 	}
1353 
1354 	if (rc) {
1355 		mutex_unlock(&cfile->fh_mutex);
1356 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1357 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1358 		goto reopen_error_exit;
1359 	}
1360 
1361 	if (rdwr_for_fscache == 2)
1362 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1363 
1364 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1365 reopen_success:
1366 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1367 	cfile->invalidHandle = false;
1368 	mutex_unlock(&cfile->fh_mutex);
1369 	cinode = CIFS_I(inode);
1370 
1371 	if (can_flush) {
1372 		rc = filemap_write_and_wait(inode->i_mapping);
1373 		if (!is_interrupt_error(rc))
1374 			mapping_set_error(inode->i_mapping, rc);
1375 
1376 		if (tcon->posix_extensions) {
1377 			rc = smb311_posix_get_inode_info(&inode, full_path,
1378 							 NULL, inode->i_sb, xid);
1379 		} else if (tcon->unix_ext) {
1380 			rc = cifs_get_inode_info_unix(&inode, full_path,
1381 						      inode->i_sb, xid);
1382 		} else {
1383 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1384 						 inode->i_sb, xid, NULL);
1385 		}
1386 	}
1387 	/*
1388 	 * Else we are writing out data to server already and could deadlock if
1389 	 * we tried to flush data, and since we do not know if we have data that
1390 	 * would invalidate the current end of file on the server we can not go
1391 	 * to the server to get the new inode info.
1392 	 */
1393 
1394 	/*
1395 	 * If the server returned a read oplock and we have mandatory brlocks,
1396 	 * set oplock level to None.
1397 	 */
1398 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1399 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1400 		oplock = 0;
1401 	}
1402 
1403 	scoped_guard(spinlock, &cinode->open_file_lock)
1404 		server->ops->set_fid(cfile, &cfile->fid, oplock);
1405 	if (oparms.reconnect)
1406 		cifs_relock_file(cfile);
1407 
1408 reopen_error_exit:
1409 	free_dentry_path(page);
1410 	free_xid(xid);
1411 	return rc;
1412 }
1413 
smb2_deferred_work_close(struct work_struct * work)1414 void smb2_deferred_work_close(struct work_struct *work)
1415 {
1416 	struct cifsFileInfo *cfile = container_of(work,
1417 			struct cifsFileInfo, deferred.work);
1418 
1419 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1420 	cifs_del_deferred_close(cfile);
1421 	cfile->deferred_close_scheduled = false;
1422 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1423 	_cifsFileInfo_put(cfile, true, false);
1424 }
1425 
1426 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1427 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1428 {
1429 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1430 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1431 	unsigned int oplock = READ_ONCE(cinode->oplock);
1432 
1433 	return cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1434 		(oplock == CIFS_CACHE_RHW_FLG || oplock == CIFS_CACHE_RH_FLG) &&
1435 		!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags);
1436 
1437 }
1438 
cifs_close(struct inode * inode,struct file * file)1439 int cifs_close(struct inode *inode, struct file *file)
1440 {
1441 	struct cifsFileInfo *cfile;
1442 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1443 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1444 	struct cifs_deferred_close *dclose;
1445 
1446 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1447 
1448 	if (file->private_data != NULL) {
1449 		cfile = file->private_data;
1450 		file->private_data = NULL;
1451 		dclose = kmalloc_obj(struct cifs_deferred_close);
1452 		if ((cfile->status_file_deleted == false) &&
1453 		    (smb2_can_defer_close(inode, dclose))) {
1454 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1455 				inode_set_mtime_to_ts(inode,
1456 						      inode_set_ctime_current(inode));
1457 			}
1458 			spin_lock(&cinode->deferred_lock);
1459 			cifs_add_deferred_close(cfile, dclose);
1460 			if (cfile->deferred_close_scheduled &&
1461 			    delayed_work_pending(&cfile->deferred)) {
1462 				/*
1463 				 * If there is no pending work, mod_delayed_work queues new work.
1464 				 * So, Increase the ref count to avoid use-after-free.
1465 				 */
1466 				if (!mod_delayed_work(deferredclose_wq,
1467 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1468 					cifsFileInfo_get(cfile);
1469 			} else {
1470 				/* Deferred close for files */
1471 				queue_delayed_work(deferredclose_wq,
1472 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1473 				cfile->deferred_close_scheduled = true;
1474 				spin_unlock(&cinode->deferred_lock);
1475 				return 0;
1476 			}
1477 			spin_unlock(&cinode->deferred_lock);
1478 			_cifsFileInfo_put(cfile, true, false);
1479 		} else {
1480 			_cifsFileInfo_put(cfile, true, false);
1481 			kfree(dclose);
1482 		}
1483 	}
1484 
1485 	/* return code from the ->release op is always ignored */
1486 	return 0;
1487 }
1488 
1489 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1490 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1491 {
1492 	struct cifsFileInfo *open_file, *tmp;
1493 	LIST_HEAD(tmp_list);
1494 
1495 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1496 		return;
1497 
1498 	tcon->need_reopen_files = false;
1499 
1500 	cifs_dbg(FYI, "Reopen persistent handles\n");
1501 
1502 	/* list all files open on tree connection, reopen resilient handles  */
1503 	spin_lock(&tcon->open_file_lock);
1504 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1505 		if (!open_file->invalidHandle)
1506 			continue;
1507 		cifsFileInfo_get(open_file);
1508 		list_add_tail(&open_file->rlist, &tmp_list);
1509 	}
1510 	spin_unlock(&tcon->open_file_lock);
1511 
1512 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1513 		if (cifs_reopen_file(open_file, false /* do not flush */))
1514 			tcon->need_reopen_files = true;
1515 		list_del_init(&open_file->rlist);
1516 		cifsFileInfo_put(open_file);
1517 	}
1518 }
1519 
cifs_closedir(struct inode * inode,struct file * file)1520 int cifs_closedir(struct inode *inode, struct file *file)
1521 {
1522 	int rc = 0;
1523 	unsigned int xid;
1524 	struct cifsFileInfo *cfile = file->private_data;
1525 	struct cifs_tcon *tcon;
1526 	struct TCP_Server_Info *server;
1527 	char *buf;
1528 
1529 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1530 
1531 	if (cfile == NULL)
1532 		return rc;
1533 
1534 	xid = get_xid();
1535 	tcon = tlink_tcon(cfile->tlink);
1536 	server = tcon->ses->server;
1537 
1538 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1539 	spin_lock(&cfile->file_info_lock);
1540 	if (server->ops->dir_needs_close(cfile)) {
1541 		cfile->invalidHandle = true;
1542 		spin_unlock(&cfile->file_info_lock);
1543 		if (server->ops->close_dir)
1544 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1545 		else
1546 			rc = -ENOSYS;
1547 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1548 		/* not much we can do if it fails anyway, ignore rc */
1549 		rc = 0;
1550 	} else
1551 		spin_unlock(&cfile->file_info_lock);
1552 
1553 	buf = cfile->srch_inf.ntwrk_buf_start;
1554 	if (buf) {
1555 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1556 		cfile->srch_inf.ntwrk_buf_start = NULL;
1557 		if (cfile->srch_inf.smallBuf)
1558 			cifs_small_buf_release(buf);
1559 		else
1560 			cifs_buf_release(buf);
1561 	}
1562 
1563 	cifs_put_tlink(cfile->tlink);
1564 	kfree(file->private_data);
1565 	file->private_data = NULL;
1566 	/* BB can we lock the filestruct while this is going on? */
1567 	free_xid(xid);
1568 	return rc;
1569 }
1570 
1571 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1572 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1573 {
1574 	struct cifsLockInfo *lock =
1575 		kmalloc_obj(struct cifsLockInfo);
1576 	if (!lock)
1577 		return lock;
1578 	lock->offset = offset;
1579 	lock->length = length;
1580 	lock->type = type;
1581 	lock->pid = current->tgid;
1582 	lock->flags = flags;
1583 	INIT_LIST_HEAD(&lock->blist);
1584 	init_waitqueue_head(&lock->block_q);
1585 	return lock;
1586 }
1587 
1588 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1589 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1590 {
1591 	struct cifsLockInfo *li, *tmp;
1592 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1593 		list_del_init(&li->blist);
1594 		wake_up(&li->block_q);
1595 	}
1596 }
1597 
1598 #define CIFS_LOCK_OP	0
1599 #define CIFS_READ_OP	1
1600 #define CIFS_WRITE_OP	2
1601 
1602 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1603 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1604 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1605 			    __u64 length, __u8 type, __u16 flags,
1606 			    struct cifsFileInfo *cfile,
1607 			    struct cifsLockInfo **conf_lock, int rw_check)
1608 {
1609 	struct cifsLockInfo *li;
1610 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1611 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1612 
1613 	list_for_each_entry(li, &fdlocks->locks, llist) {
1614 		if (offset + length <= li->offset ||
1615 		    offset >= li->offset + li->length)
1616 			continue;
1617 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1618 		    server->ops->compare_fids(cfile, cur_cfile)) {
1619 			/* shared lock prevents write op through the same fid */
1620 			if (!(li->type & server->vals->shared_lock_type) ||
1621 			    rw_check != CIFS_WRITE_OP)
1622 				continue;
1623 		}
1624 		if ((type & server->vals->shared_lock_type) &&
1625 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1626 		     current->tgid == li->pid) || type == li->type))
1627 			continue;
1628 		if (rw_check == CIFS_LOCK_OP &&
1629 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1630 		    server->ops->compare_fids(cfile, cur_cfile))
1631 			continue;
1632 		if (conf_lock)
1633 			*conf_lock = li;
1634 		return true;
1635 	}
1636 	return false;
1637 }
1638 
1639 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1640 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1641 			__u8 type, __u16 flags,
1642 			struct cifsLockInfo **conf_lock, int rw_check)
1643 {
1644 	bool rc = false;
1645 	struct cifs_fid_locks *cur;
1646 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1647 
1648 	list_for_each_entry(cur, &cinode->llist, llist) {
1649 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1650 						 flags, cfile, conf_lock,
1651 						 rw_check);
1652 		if (rc)
1653 			break;
1654 	}
1655 
1656 	return rc;
1657 }
1658 
1659 /*
1660  * Check if there is another lock that prevents us to set the lock (mandatory
1661  * style). If such a lock exists, update the flock structure with its
1662  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1663  * or leave it the same if we can't. Returns 0 if we don't need to request to
1664  * the server or 1 otherwise.
1665  */
1666 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1667 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1668 	       __u8 type, struct file_lock *flock)
1669 {
1670 	int rc = 0;
1671 	struct cifsLockInfo *conf_lock;
1672 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1673 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1674 	bool exist;
1675 
1676 	down_read(&cinode->lock_sem);
1677 
1678 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1679 					flock->c.flc_flags, &conf_lock,
1680 					CIFS_LOCK_OP);
1681 	if (exist) {
1682 		flock->fl_start = conf_lock->offset;
1683 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1684 		flock->c.flc_pid = conf_lock->pid;
1685 		if (conf_lock->type & server->vals->shared_lock_type)
1686 			flock->c.flc_type = F_RDLCK;
1687 		else
1688 			flock->c.flc_type = F_WRLCK;
1689 	} else if (!cinode->can_cache_brlcks)
1690 		rc = 1;
1691 	else
1692 		flock->c.flc_type = F_UNLCK;
1693 
1694 	up_read(&cinode->lock_sem);
1695 	return rc;
1696 }
1697 
1698 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1699 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1700 {
1701 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1702 	cifs_down_write(&cinode->lock_sem);
1703 	list_add_tail(&lock->llist, &cfile->llist->locks);
1704 	up_write(&cinode->lock_sem);
1705 }
1706 
1707 /*
1708  * Set the byte-range lock (mandatory style). Returns:
1709  * 1) 0, if we set the lock and don't need to request to the server;
1710  * 2) 1, if no locks prevent us but we need to request to the server;
1711  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1712  */
1713 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1714 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1715 		 bool wait)
1716 {
1717 	struct cifsLockInfo *conf_lock;
1718 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1719 	bool exist;
1720 	int rc = 0;
1721 
1722 try_again:
1723 	exist = false;
1724 	cifs_down_write(&cinode->lock_sem);
1725 
1726 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1727 					lock->type, lock->flags, &conf_lock,
1728 					CIFS_LOCK_OP);
1729 	if (!exist && cinode->can_cache_brlcks) {
1730 		list_add_tail(&lock->llist, &cfile->llist->locks);
1731 		up_write(&cinode->lock_sem);
1732 		return rc;
1733 	}
1734 
1735 	if (!exist)
1736 		rc = 1;
1737 	else if (!wait)
1738 		rc = -EACCES;
1739 	else {
1740 		list_add_tail(&lock->blist, &conf_lock->blist);
1741 		up_write(&cinode->lock_sem);
1742 		rc = wait_event_interruptible(lock->block_q,
1743 					(lock->blist.prev == &lock->blist) &&
1744 					(lock->blist.next == &lock->blist));
1745 		if (!rc)
1746 			goto try_again;
1747 		cifs_down_write(&cinode->lock_sem);
1748 		list_del_init(&lock->blist);
1749 	}
1750 
1751 	up_write(&cinode->lock_sem);
1752 	return rc;
1753 }
1754 
1755 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1756 /*
1757  * Check if there is another lock that prevents us to set the lock (posix
1758  * style). If such a lock exists, update the flock structure with its
1759  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1760  * or leave it the same if we can't. Returns 0 if we don't need to request to
1761  * the server or 1 otherwise.
1762  */
1763 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1764 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1765 {
1766 	int rc = 0;
1767 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1768 	unsigned char saved_type = flock->c.flc_type;
1769 
1770 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1771 		return 1;
1772 
1773 	down_read(&cinode->lock_sem);
1774 	posix_test_lock(file, flock);
1775 
1776 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1777 		flock->c.flc_type = saved_type;
1778 		rc = 1;
1779 	}
1780 
1781 	up_read(&cinode->lock_sem);
1782 	return rc;
1783 }
1784 
1785 /*
1786  * Set the byte-range lock (posix style). Returns:
1787  * 1) <0, if the error occurs while setting the lock;
1788  * 2) 0, if we set the lock and don't need to request to the server;
1789  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1790  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1791  */
1792 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1793 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1794 {
1795 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1796 	int rc = FILE_LOCK_DEFERRED + 1;
1797 
1798 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1799 		return rc;
1800 
1801 	cifs_down_write(&cinode->lock_sem);
1802 	if (!cinode->can_cache_brlcks) {
1803 		up_write(&cinode->lock_sem);
1804 		return rc;
1805 	}
1806 
1807 	rc = posix_lock_file(file, flock, NULL);
1808 	up_write(&cinode->lock_sem);
1809 	return rc;
1810 }
1811 
1812 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1813 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1814 {
1815 	unsigned int xid;
1816 	int rc = 0, stored_rc;
1817 	struct cifsLockInfo *li, *tmp;
1818 	struct cifs_tcon *tcon;
1819 	unsigned int num, max_num, max_buf;
1820 	LOCKING_ANDX_RANGE *buf, *cur;
1821 	static const int types[] = {
1822 		LOCKING_ANDX_LARGE_FILES,
1823 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1824 	};
1825 	int i;
1826 
1827 	xid = get_xid();
1828 	tcon = tlink_tcon(cfile->tlink);
1829 
1830 	/*
1831 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1832 	 * and check it before using.
1833 	 */
1834 	max_buf = tcon->ses->server->maxBuf;
1835 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1836 		free_xid(xid);
1837 		return -EINVAL;
1838 	}
1839 
1840 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1841 		     PAGE_SIZE);
1842 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1843 			PAGE_SIZE);
1844 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1845 						sizeof(LOCKING_ANDX_RANGE);
1846 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
1847 	if (!buf) {
1848 		free_xid(xid);
1849 		return -ENOMEM;
1850 	}
1851 
1852 	for (i = 0; i < 2; i++) {
1853 		cur = buf;
1854 		num = 0;
1855 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1856 			if (li->type != types[i])
1857 				continue;
1858 			cur->Pid = cpu_to_le16(li->pid);
1859 			cur->LengthLow = cpu_to_le32((u32)li->length);
1860 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1861 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1862 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1863 			if (++num == max_num) {
1864 				stored_rc = cifs_lockv(xid, tcon,
1865 						       cfile->fid.netfid,
1866 						       (__u8)li->type, 0, num,
1867 						       buf);
1868 				if (stored_rc)
1869 					rc = stored_rc;
1870 				cur = buf;
1871 				num = 0;
1872 			} else
1873 				cur++;
1874 		}
1875 
1876 		if (num) {
1877 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1878 					       (__u8)types[i], 0, num, buf);
1879 			if (stored_rc)
1880 				rc = stored_rc;
1881 		}
1882 	}
1883 
1884 	kfree(buf);
1885 	free_xid(xid);
1886 	return rc;
1887 }
1888 
1889 static __u32
hash_lockowner(fl_owner_t owner)1890 hash_lockowner(fl_owner_t owner)
1891 {
1892 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1893 }
1894 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1895 
1896 struct lock_to_push {
1897 	struct list_head llist;
1898 	__u64 offset;
1899 	__u64 length;
1900 	__u32 pid;
1901 	__u16 netfid;
1902 	__u8 type;
1903 };
1904 
1905 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1906 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1907 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1908 {
1909 	struct inode *inode = d_inode(cfile->dentry);
1910 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1911 	struct file_lock *flock;
1912 	struct file_lock_context *flctx = locks_inode_context(inode);
1913 	unsigned int count = 0, i;
1914 	int rc = 0, xid, type;
1915 	struct list_head locks_to_send, *el;
1916 	struct lock_to_push *lck, *tmp;
1917 	__u64 length;
1918 
1919 	xid = get_xid();
1920 
1921 	if (!flctx)
1922 		goto out;
1923 
1924 	spin_lock(&flctx->flc_lock);
1925 	list_for_each(el, &flctx->flc_posix) {
1926 		count++;
1927 	}
1928 	spin_unlock(&flctx->flc_lock);
1929 
1930 	INIT_LIST_HEAD(&locks_to_send);
1931 
1932 	/*
1933 	 * Allocating count locks is enough because no FL_POSIX locks can be
1934 	 * added to the list while we are holding cinode->lock_sem that
1935 	 * protects locking operations of this inode.
1936 	 */
1937 	for (i = 0; i < count; i++) {
1938 		lck = kmalloc_obj(struct lock_to_push);
1939 		if (!lck) {
1940 			rc = -ENOMEM;
1941 			goto err_out;
1942 		}
1943 		list_add_tail(&lck->llist, &locks_to_send);
1944 	}
1945 
1946 	el = locks_to_send.next;
1947 	spin_lock(&flctx->flc_lock);
1948 	for_each_file_lock(flock, &flctx->flc_posix) {
1949 		unsigned char ftype = flock->c.flc_type;
1950 
1951 		if (el == &locks_to_send) {
1952 			/*
1953 			 * The list ended. We don't have enough allocated
1954 			 * structures - something is really wrong.
1955 			 */
1956 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1957 			break;
1958 		}
1959 		length = cifs_flock_len(flock);
1960 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1961 			type = CIFS_RDLCK;
1962 		else
1963 			type = CIFS_WRLCK;
1964 		lck = list_entry(el, struct lock_to_push, llist);
1965 		lck->pid = hash_lockowner(flock->c.flc_owner);
1966 		lck->netfid = cfile->fid.netfid;
1967 		lck->length = length;
1968 		lck->type = type;
1969 		lck->offset = flock->fl_start;
1970 	}
1971 	spin_unlock(&flctx->flc_lock);
1972 
1973 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1974 		int stored_rc;
1975 
1976 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1977 					     lck->offset, lck->length, NULL,
1978 					     lck->type, 0);
1979 		if (stored_rc)
1980 			rc = stored_rc;
1981 		list_del(&lck->llist);
1982 		kfree(lck);
1983 	}
1984 
1985 out:
1986 	free_xid(xid);
1987 	return rc;
1988 err_out:
1989 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1990 		list_del(&lck->llist);
1991 		kfree(lck);
1992 	}
1993 	goto out;
1994 }
1995 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1996 
1997 static int
cifs_push_locks(struct cifsFileInfo * cfile)1998 cifs_push_locks(struct cifsFileInfo *cfile)
1999 {
2000 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2001 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2002 	int rc = 0;
2003 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2004 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
2005 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2006 
2007 	/* we are going to update can_cache_brlcks here - need a write access */
2008 	cifs_down_write(&cinode->lock_sem);
2009 	if (!cinode->can_cache_brlcks) {
2010 		up_write(&cinode->lock_sem);
2011 		return rc;
2012 	}
2013 
2014 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2015 	if (cap_unix(tcon->ses) &&
2016 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2017 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2018 		rc = cifs_push_posix_locks(cfile);
2019 	else
2020 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2021 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
2022 
2023 	cinode->can_cache_brlcks = false;
2024 	up_write(&cinode->lock_sem);
2025 	return rc;
2026 }
2027 
2028 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)2029 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
2030 		bool *wait_flag, struct TCP_Server_Info *server)
2031 {
2032 	if (flock->c.flc_flags & FL_POSIX)
2033 		cifs_dbg(FYI, "Posix\n");
2034 	if (flock->c.flc_flags & FL_FLOCK)
2035 		cifs_dbg(FYI, "Flock\n");
2036 	if (flock->c.flc_flags & FL_SLEEP) {
2037 		cifs_dbg(FYI, "Blocking lock\n");
2038 		*wait_flag = true;
2039 	}
2040 	if (flock->c.flc_flags & FL_ACCESS)
2041 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
2042 	if (flock->c.flc_flags & FL_LEASE)
2043 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
2044 	if (flock->c.flc_flags &
2045 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
2046 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
2047 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
2048 		         flock->c.flc_flags);
2049 
2050 	*type = server->vals->large_lock_type;
2051 	if (lock_is_write(flock)) {
2052 		cifs_dbg(FYI, "F_WRLCK\n");
2053 		*type |= server->vals->exclusive_lock_type;
2054 		*lock = 1;
2055 	} else if (lock_is_unlock(flock)) {
2056 		cifs_dbg(FYI, "F_UNLCK\n");
2057 		*type |= server->vals->unlock_lock_type;
2058 		*unlock = 1;
2059 		/* Check if unlock includes more than one lock range */
2060 	} else if (lock_is_read(flock)) {
2061 		cifs_dbg(FYI, "F_RDLCK\n");
2062 		*type |= server->vals->shared_lock_type;
2063 		*lock = 1;
2064 	} else if (flock->c.flc_type == F_EXLCK) {
2065 		cifs_dbg(FYI, "F_EXLCK\n");
2066 		*type |= server->vals->exclusive_lock_type;
2067 		*lock = 1;
2068 	} else if (flock->c.flc_type == F_SHLCK) {
2069 		cifs_dbg(FYI, "F_SHLCK\n");
2070 		*type |= server->vals->shared_lock_type;
2071 		*lock = 1;
2072 	} else
2073 		cifs_dbg(FYI, "Unknown type of lock\n");
2074 }
2075 
2076 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2077 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2078 	   bool wait_flag, bool posix_lck, unsigned int xid)
2079 {
2080 	int rc = 0;
2081 	__u64 length = cifs_flock_len(flock);
2082 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2083 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2084 	struct TCP_Server_Info *server = tcon->ses->server;
2085 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2086 	__u16 netfid = cfile->fid.netfid;
2087 
2088 	if (posix_lck) {
2089 		int posix_lock_type;
2090 
2091 		rc = cifs_posix_lock_test(file, flock);
2092 		if (!rc)
2093 			return rc;
2094 
2095 		if (type & server->vals->shared_lock_type)
2096 			posix_lock_type = CIFS_RDLCK;
2097 		else
2098 			posix_lock_type = CIFS_WRLCK;
2099 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2100 				      hash_lockowner(flock->c.flc_owner),
2101 				      flock->fl_start, length, flock,
2102 				      posix_lock_type, wait_flag);
2103 		return rc;
2104 	}
2105 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2106 
2107 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2108 	if (!rc)
2109 		return rc;
2110 
2111 	/* BB we could chain these into one lock request BB */
2112 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2113 				    1, 0, false);
2114 	if (rc == 0) {
2115 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2116 					    type, 0, 1, false);
2117 		flock->c.flc_type = F_UNLCK;
2118 		if (rc != 0)
2119 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2120 				 rc);
2121 		return 0;
2122 	}
2123 
2124 	if (type & server->vals->shared_lock_type) {
2125 		flock->c.flc_type = F_WRLCK;
2126 		return 0;
2127 	}
2128 
2129 	type &= ~server->vals->exclusive_lock_type;
2130 
2131 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2132 				    type | server->vals->shared_lock_type,
2133 				    1, 0, false);
2134 	if (rc == 0) {
2135 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2136 			type | server->vals->shared_lock_type, 0, 1, false);
2137 		flock->c.flc_type = F_RDLCK;
2138 		if (rc != 0)
2139 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2140 				 rc);
2141 	} else
2142 		flock->c.flc_type = F_WRLCK;
2143 
2144 	return 0;
2145 }
2146 
2147 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2148 cifs_move_llist(struct list_head *source, struct list_head *dest)
2149 {
2150 	struct list_head *li, *tmp;
2151 	list_for_each_safe(li, tmp, source)
2152 		list_move(li, dest);
2153 }
2154 
2155 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2156 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2157 				struct file *file)
2158 {
2159 	struct cifsFileInfo *open_file = NULL;
2160 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2161 	int rc = 0;
2162 
2163 	spin_lock(&tcon->open_file_lock);
2164 	spin_lock(&cinode->open_file_lock);
2165 
2166 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2167 		if (file->f_flags == open_file->f_flags) {
2168 			rc = -EINVAL;
2169 			break;
2170 		}
2171 	}
2172 
2173 	spin_unlock(&cinode->open_file_lock);
2174 	spin_unlock(&tcon->open_file_lock);
2175 	return rc;
2176 }
2177 
2178 void
cifs_free_llist(struct list_head * llist)2179 cifs_free_llist(struct list_head *llist)
2180 {
2181 	struct cifsLockInfo *li, *tmp;
2182 	list_for_each_entry_safe(li, tmp, llist, llist) {
2183 		cifs_del_lock_waiters(li);
2184 		list_del(&li->llist);
2185 		kfree(li);
2186 	}
2187 }
2188 
2189 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2190 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2191 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2192 		  unsigned int xid)
2193 {
2194 	int rc = 0, stored_rc;
2195 	static const int types[] = {
2196 		LOCKING_ANDX_LARGE_FILES,
2197 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2198 	};
2199 	unsigned int i;
2200 	unsigned int max_num, num, max_buf;
2201 	LOCKING_ANDX_RANGE *buf, *cur;
2202 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2203 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2204 	struct cifsLockInfo *li, *tmp;
2205 	__u64 length = cifs_flock_len(flock);
2206 	LIST_HEAD(tmp_llist);
2207 
2208 	/*
2209 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2210 	 * and check it before using.
2211 	 */
2212 	max_buf = tcon->ses->server->maxBuf;
2213 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2214 		return -EINVAL;
2215 
2216 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2217 		     PAGE_SIZE);
2218 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2219 			PAGE_SIZE);
2220 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2221 						sizeof(LOCKING_ANDX_RANGE);
2222 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
2223 	if (!buf)
2224 		return -ENOMEM;
2225 
2226 	cifs_down_write(&cinode->lock_sem);
2227 	for (i = 0; i < 2; i++) {
2228 		cur = buf;
2229 		num = 0;
2230 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2231 			if (flock->fl_start > li->offset ||
2232 			    (flock->fl_start + length) <
2233 			    (li->offset + li->length))
2234 				continue;
2235 			if (current->tgid != li->pid)
2236 				continue;
2237 			if (types[i] != li->type)
2238 				continue;
2239 			if (cinode->can_cache_brlcks) {
2240 				/*
2241 				 * We can cache brlock requests - simply remove
2242 				 * a lock from the file's list.
2243 				 */
2244 				list_del(&li->llist);
2245 				cifs_del_lock_waiters(li);
2246 				kfree(li);
2247 				continue;
2248 			}
2249 			cur->Pid = cpu_to_le16(li->pid);
2250 			cur->LengthLow = cpu_to_le32((u32)li->length);
2251 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2252 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2253 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2254 			/*
2255 			 * We need to save a lock here to let us add it again to
2256 			 * the file's list if the unlock range request fails on
2257 			 * the server.
2258 			 */
2259 			list_move(&li->llist, &tmp_llist);
2260 			if (++num == max_num) {
2261 				stored_rc = cifs_lockv(xid, tcon,
2262 						       cfile->fid.netfid,
2263 						       li->type, num, 0, buf);
2264 				if (stored_rc) {
2265 					/*
2266 					 * We failed on the unlock range
2267 					 * request - add all locks from the tmp
2268 					 * list to the head of the file's list.
2269 					 */
2270 					cifs_move_llist(&tmp_llist,
2271 							&cfile->llist->locks);
2272 					rc = stored_rc;
2273 				} else
2274 					/*
2275 					 * The unlock range request succeed -
2276 					 * free the tmp list.
2277 					 */
2278 					cifs_free_llist(&tmp_llist);
2279 				cur = buf;
2280 				num = 0;
2281 			} else
2282 				cur++;
2283 		}
2284 		if (num) {
2285 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2286 					       types[i], num, 0, buf);
2287 			if (stored_rc) {
2288 				cifs_move_llist(&tmp_llist,
2289 						&cfile->llist->locks);
2290 				rc = stored_rc;
2291 			} else
2292 				cifs_free_llist(&tmp_llist);
2293 		}
2294 	}
2295 
2296 	up_write(&cinode->lock_sem);
2297 	kfree(buf);
2298 	return rc;
2299 }
2300 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2301 
2302 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2303 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2304 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2305 	   unsigned int xid)
2306 {
2307 	int rc = 0;
2308 	__u64 length = cifs_flock_len(flock);
2309 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2310 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2311 	struct TCP_Server_Info *server = tcon->ses->server;
2312 	struct inode *inode = d_inode(cfile->dentry);
2313 
2314 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2315 	if (posix_lck) {
2316 		int posix_lock_type;
2317 
2318 		rc = cifs_posix_lock_set(file, flock);
2319 		if (rc <= FILE_LOCK_DEFERRED)
2320 			return rc;
2321 
2322 		if (type & server->vals->shared_lock_type)
2323 			posix_lock_type = CIFS_RDLCK;
2324 		else
2325 			posix_lock_type = CIFS_WRLCK;
2326 
2327 		if (unlock == 1)
2328 			posix_lock_type = CIFS_UNLCK;
2329 
2330 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2331 				      hash_lockowner(flock->c.flc_owner),
2332 				      flock->fl_start, length,
2333 				      NULL, posix_lock_type, wait_flag);
2334 		goto out;
2335 	}
2336 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2337 	if (lock) {
2338 		struct cifsLockInfo *lock;
2339 
2340 		lock = cifs_lock_init(flock->fl_start, length, type,
2341 				      flock->c.flc_flags);
2342 		if (!lock)
2343 			return -ENOMEM;
2344 
2345 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2346 		if (rc < 0) {
2347 			kfree(lock);
2348 			return rc;
2349 		}
2350 		if (!rc)
2351 			goto out;
2352 
2353 		/*
2354 		 * Windows 7 server can delay breaking lease from read to None
2355 		 * if we set a byte-range lock on a file - break it explicitly
2356 		 * before sending the lock to the server to be sure the next
2357 		 * read won't conflict with non-overlapted locks due to
2358 		 * pagereading.
2359 		 */
2360 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2361 					CIFS_CACHE_READ(CIFS_I(inode))) {
2362 			cifs_zap_mapping(inode);
2363 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2364 				 inode);
2365 			cifs_reset_oplock(CIFS_I(inode));
2366 		}
2367 
2368 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2369 					    type, 1, 0, wait_flag);
2370 		if (rc) {
2371 			kfree(lock);
2372 			return rc;
2373 		}
2374 
2375 		cifs_lock_add(cfile, lock);
2376 	} else if (unlock)
2377 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2378 
2379 out:
2380 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2381 		/*
2382 		 * If this is a request to remove all locks because we
2383 		 * are closing the file, it doesn't matter if the
2384 		 * unlocking failed as both cifs.ko and the SMB server
2385 		 * remove the lock on file close
2386 		 */
2387 		if (rc) {
2388 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2389 			if (!(flock->c.flc_flags & FL_CLOSE))
2390 				return rc;
2391 		}
2392 		rc = locks_lock_file_wait(file, flock);
2393 	}
2394 	return rc;
2395 }
2396 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2397 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2398 {
2399 	int rc, xid;
2400 	int lock = 0, unlock = 0;
2401 	bool wait_flag = false;
2402 	bool posix_lck = false;
2403 	struct cifs_sb_info *cifs_sb;
2404 	struct cifs_tcon *tcon;
2405 	struct cifsFileInfo *cfile;
2406 	__u32 type;
2407 
2408 	xid = get_xid();
2409 
2410 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2411 		rc = -ENOLCK;
2412 		free_xid(xid);
2413 		return rc;
2414 	}
2415 
2416 	cfile = (struct cifsFileInfo *)file->private_data;
2417 	tcon = tlink_tcon(cfile->tlink);
2418 
2419 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2420 			tcon->ses->server);
2421 	cifs_sb = CIFS_SB(file);
2422 
2423 	if (cap_unix(tcon->ses) &&
2424 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2425 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2426 		posix_lck = true;
2427 
2428 	if (!lock && !unlock) {
2429 		/*
2430 		 * if no lock or unlock then nothing to do since we do not
2431 		 * know what it is
2432 		 */
2433 		rc = -EOPNOTSUPP;
2434 		free_xid(xid);
2435 		return rc;
2436 	}
2437 
2438 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2439 			xid);
2440 	free_xid(xid);
2441 	return rc;
2442 
2443 
2444 }
2445 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2446 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2447 {
2448 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2449 	struct cifsFileInfo *cfile;
2450 	int lock = 0, unlock = 0;
2451 	bool wait_flag = false;
2452 	bool posix_lck = false;
2453 	struct cifs_tcon *tcon;
2454 	__u32 type;
2455 	int rc, xid;
2456 
2457 	rc = -EACCES;
2458 	xid = get_xid();
2459 
2460 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2461 		 flock->c.flc_flags, flock->c.flc_type,
2462 		 (long long)flock->fl_start,
2463 		 (long long)flock->fl_end);
2464 
2465 	cfile = (struct cifsFileInfo *)file->private_data;
2466 	tcon = tlink_tcon(cfile->tlink);
2467 
2468 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2469 			tcon->ses->server);
2470 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2471 
2472 	if (cap_unix(tcon->ses) &&
2473 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2474 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2475 		posix_lck = true;
2476 	/*
2477 	 * BB add code here to normalize offset and length to account for
2478 	 * negative length which we can not accept over the wire.
2479 	 */
2480 	if (IS_GETLK(cmd)) {
2481 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2482 		free_xid(xid);
2483 		return rc;
2484 	}
2485 
2486 	if (!lock && !unlock) {
2487 		/*
2488 		 * if no lock or unlock then nothing to do since we do not
2489 		 * know what it is
2490 		 */
2491 		free_xid(xid);
2492 		return -EOPNOTSUPP;
2493 	}
2494 
2495 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2496 			xid);
2497 	free_xid(xid);
2498 	return rc;
2499 }
2500 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result)2501 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2502 {
2503 	struct netfs_io_request *wreq = wdata->rreq;
2504 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2505 	loff_t wrend;
2506 
2507 	if (result > 0) {
2508 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2509 
2510 		if (wrend > ictx->zero_point &&
2511 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2512 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2513 			ictx->zero_point = wrend;
2514 		if (wrend > ictx->remote_i_size)
2515 			netfs_resize_file(ictx, wrend, true);
2516 	}
2517 
2518 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2519 }
2520 
open_flags_match(struct cifsInodeInfo * cinode,unsigned int oflags,unsigned int cflags)2521 static bool open_flags_match(struct cifsInodeInfo *cinode,
2522 			     unsigned int oflags, unsigned int cflags)
2523 {
2524 	struct inode *inode = &cinode->netfs.inode;
2525 	int crw = 0, orw = 0;
2526 
2527 	oflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2528 	cflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2529 
2530 	if (cifs_fscache_enabled(inode)) {
2531 		if (OPEN_FMODE(cflags) & FMODE_WRITE)
2532 			crw = 1;
2533 		if (OPEN_FMODE(oflags) & FMODE_WRITE)
2534 			orw = 1;
2535 	}
2536 	if (cifs_convert_flags(oflags, orw) != cifs_convert_flags(cflags, crw))
2537 		return false;
2538 
2539 	return (oflags & (O_SYNC | O_DIRECT)) == (cflags & (O_SYNC | O_DIRECT));
2540 }
2541 
__find_readable_file(struct cifsInodeInfo * cifs_inode,unsigned int find_flags,unsigned int open_flags)2542 struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode,
2543 					  unsigned int find_flags,
2544 					  unsigned int open_flags)
2545 {
2546 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode);
2547 	bool fsuid_only = find_flags & FIND_FSUID_ONLY;
2548 	struct cifsFileInfo *open_file = NULL;
2549 
2550 	/* only filter by fsuid on multiuser mounts */
2551 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2552 		fsuid_only = false;
2553 
2554 	spin_lock(&cifs_inode->open_file_lock);
2555 	/* we could simply get the first_list_entry since write-only entries
2556 	   are always at the end of the list but since the first entry might
2557 	   have a close pending, we go through the whole list */
2558 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2559 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2560 			continue;
2561 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2562 		    open_file->status_file_deleted)
2563 			continue;
2564 		if ((find_flags & FIND_OPEN_FLAGS) &&
2565 		    !open_flags_match(cifs_inode, open_flags,
2566 				      open_file->f_flags))
2567 			continue;
2568 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2569 			if ((!open_file->invalidHandle)) {
2570 				/* found a good file */
2571 				/* lock it so it will not be closed on us */
2572 				cifsFileInfo_get(open_file);
2573 				spin_unlock(&cifs_inode->open_file_lock);
2574 				return open_file;
2575 			} /* else might as well continue, and look for
2576 			     another, or simply have the caller reopen it
2577 			     again rather than trying to fix this handle */
2578 		} else /* write only file */
2579 			break; /* write only files are last so must be done */
2580 	}
2581 	spin_unlock(&cifs_inode->open_file_lock);
2582 	return NULL;
2583 }
2584 
2585 /* Return -EBADF if no handle is found and general rc otherwise */
__cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,unsigned int find_flags,unsigned int open_flags,struct cifsFileInfo ** ret_file)2586 int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
2587 			     unsigned int find_flags, unsigned int open_flags,
2588 			     struct cifsFileInfo **ret_file)
2589 {
2590 	struct cifsFileInfo *open_file, *inv_file = NULL;
2591 	bool fsuid_only, with_delete;
2592 	struct cifs_sb_info *cifs_sb;
2593 	bool any_available = false;
2594 	unsigned int refind = 0;
2595 	*ret_file = NULL;
2596 	int rc = -EBADF;
2597 
2598 	/*
2599 	 * Having a null inode here (because mapping->host was set to zero by
2600 	 * the VFS or MM) should not happen but we had reports of on oops (due
2601 	 * to it being zero) during stress testcases so we need to check for it
2602 	 */
2603 
2604 	if (cifs_inode == NULL) {
2605 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2606 		dump_stack();
2607 		return rc;
2608 	}
2609 
2610 	if (test_bit(CIFS_INO_TMPFILE, &cifs_inode->flags))
2611 		find_flags = FIND_ANY;
2612 
2613 	cifs_sb = CIFS_SB(cifs_inode);
2614 
2615 	with_delete = find_flags & FIND_WITH_DELETE;
2616 	fsuid_only = find_flags & FIND_FSUID_ONLY;
2617 	/* only filter by fsuid on multiuser mounts */
2618 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2619 		fsuid_only = false;
2620 
2621 	spin_lock(&cifs_inode->open_file_lock);
2622 refind_writable:
2623 	if (refind > MAX_REOPEN_ATT) {
2624 		spin_unlock(&cifs_inode->open_file_lock);
2625 		return rc;
2626 	}
2627 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2628 		if (!any_available && open_file->pid != current->tgid)
2629 			continue;
2630 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2631 			continue;
2632 		if (with_delete && !(open_file->fid.access & DELETE))
2633 			continue;
2634 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2635 		    open_file->status_file_deleted)
2636 			continue;
2637 		if ((find_flags & FIND_OPEN_FLAGS) &&
2638 		    !open_flags_match(cifs_inode, open_flags,
2639 				      open_file->f_flags))
2640 			continue;
2641 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2642 			if (!open_file->invalidHandle) {
2643 				/* found a good writable file */
2644 				cifsFileInfo_get(open_file);
2645 				spin_unlock(&cifs_inode->open_file_lock);
2646 				*ret_file = open_file;
2647 				return 0;
2648 			} else {
2649 				if (!inv_file)
2650 					inv_file = open_file;
2651 			}
2652 		}
2653 	}
2654 	/* couldn't find usable FH with same pid, try any available */
2655 	if (!any_available) {
2656 		any_available = true;
2657 		goto refind_writable;
2658 	}
2659 
2660 	if (inv_file) {
2661 		any_available = false;
2662 		cifsFileInfo_get(inv_file);
2663 	}
2664 
2665 	spin_unlock(&cifs_inode->open_file_lock);
2666 
2667 	if (inv_file) {
2668 		rc = cifs_reopen_file(inv_file, false);
2669 		if (!rc) {
2670 			*ret_file = inv_file;
2671 			return 0;
2672 		}
2673 
2674 		spin_lock(&cifs_inode->open_file_lock);
2675 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2676 		spin_unlock(&cifs_inode->open_file_lock);
2677 		cifsFileInfo_put(inv_file);
2678 		++refind;
2679 		inv_file = NULL;
2680 		spin_lock(&cifs_inode->open_file_lock);
2681 		goto refind_writable;
2682 	}
2683 
2684 	return rc;
2685 }
2686 
2687 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2688 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2689 {
2690 	struct cifsFileInfo *cfile;
2691 	int rc;
2692 
2693 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2694 	if (rc)
2695 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2696 
2697 	return cfile;
2698 }
2699 
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,struct inode * inode,int flags,struct cifsFileInfo ** ret_file)2700 int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2701 			   struct inode *inode, int flags,
2702 			   struct cifsFileInfo **ret_file)
2703 {
2704 	struct cifsFileInfo *cfile;
2705 	void *page;
2706 
2707 	*ret_file = NULL;
2708 
2709 	if (inode)
2710 		return cifs_get_writable_file(CIFS_I(inode), flags, ret_file);
2711 
2712 	page = alloc_dentry_path();
2713 	spin_lock(&tcon->open_file_lock);
2714 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2715 		struct cifsInodeInfo *cinode;
2716 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2717 		if (IS_ERR(full_path)) {
2718 			spin_unlock(&tcon->open_file_lock);
2719 			free_dentry_path(page);
2720 			return PTR_ERR(full_path);
2721 		}
2722 		if (strcmp(full_path, name))
2723 			continue;
2724 
2725 		cinode = CIFS_I(d_inode(cfile->dentry));
2726 		spin_unlock(&tcon->open_file_lock);
2727 		free_dentry_path(page);
2728 		return cifs_get_writable_file(cinode, flags, ret_file);
2729 	}
2730 
2731 	spin_unlock(&tcon->open_file_lock);
2732 	free_dentry_path(page);
2733 	return -ENOENT;
2734 }
2735 
2736 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2737 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2738 		       struct cifsFileInfo **ret_file)
2739 {
2740 	struct cifsFileInfo *cfile;
2741 	void *page = alloc_dentry_path();
2742 
2743 	*ret_file = NULL;
2744 
2745 	spin_lock(&tcon->open_file_lock);
2746 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2747 		struct cifsInodeInfo *cinode;
2748 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2749 		if (IS_ERR(full_path)) {
2750 			spin_unlock(&tcon->open_file_lock);
2751 			free_dentry_path(page);
2752 			return PTR_ERR(full_path);
2753 		}
2754 		if (strcmp(full_path, name))
2755 			continue;
2756 
2757 		cinode = CIFS_I(d_inode(cfile->dentry));
2758 		spin_unlock(&tcon->open_file_lock);
2759 		free_dentry_path(page);
2760 		*ret_file = find_readable_file(cinode, FIND_ANY);
2761 		return *ret_file ? 0 : -ENOENT;
2762 	}
2763 
2764 	spin_unlock(&tcon->open_file_lock);
2765 	free_dentry_path(page);
2766 	return -ENOENT;
2767 }
2768 
2769 /*
2770  * Flush data on a strict file.
2771  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2772 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2773 		      int datasync)
2774 {
2775 	struct cifsFileInfo *smbfile = file->private_data;
2776 	struct inode *inode = file_inode(file);
2777 	unsigned int xid;
2778 	int rc;
2779 
2780 	rc = file_write_and_wait_range(file, start, end);
2781 	if (rc) {
2782 		trace_cifs_fsync_err(inode->i_ino, rc);
2783 		return rc;
2784 	}
2785 
2786 	cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync);
2787 
2788 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2789 		rc = cifs_zap_mapping(inode);
2790 		cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc);
2791 	}
2792 
2793 	xid = get_xid();
2794 	rc = cifs_file_flush(xid, inode, smbfile);
2795 	free_xid(xid);
2796 	return rc;
2797 }
2798 
2799 /*
2800  * Flush data on a non-strict data.
2801  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2802 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2803 {
2804 	unsigned int xid;
2805 	int rc = 0;
2806 	struct cifs_tcon *tcon;
2807 	struct TCP_Server_Info *server;
2808 	struct cifsFileInfo *smbfile = file->private_data;
2809 	struct inode *inode = file_inode(file);
2810 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2811 
2812 	rc = file_write_and_wait_range(file, start, end);
2813 	if (rc) {
2814 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2815 		return rc;
2816 	}
2817 
2818 	xid = get_xid();
2819 
2820 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2821 		 file, datasync);
2822 
2823 	tcon = tlink_tcon(smbfile->tlink);
2824 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)) {
2825 		server = tcon->ses->server;
2826 		if (server->ops->flush == NULL) {
2827 			rc = -ENOSYS;
2828 			goto fsync_exit;
2829 		}
2830 
2831 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2832 			smbfile = find_writable_file(CIFS_I(inode), FIND_ANY);
2833 			if (smbfile) {
2834 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2835 				cifsFileInfo_put(smbfile);
2836 			} else
2837 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2838 		} else
2839 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2840 	}
2841 
2842 fsync_exit:
2843 	free_xid(xid);
2844 	return rc;
2845 }
2846 
2847 /*
2848  * As file closes, flush all cached write data for this inode checking
2849  * for write behind errors.
2850  */
cifs_flush(struct file * file,fl_owner_t id)2851 int cifs_flush(struct file *file, fl_owner_t id)
2852 {
2853 	struct inode *inode = file_inode(file);
2854 	int rc = 0;
2855 
2856 	if (file->f_mode & FMODE_WRITE)
2857 		rc = filemap_write_and_wait(inode->i_mapping);
2858 
2859 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2860 	if (rc) {
2861 		/* get more nuanced writeback errors */
2862 		rc = filemap_check_wb_err(file->f_mapping, 0);
2863 		trace_cifs_flush_err(inode->i_ino, rc);
2864 	}
2865 	return rc;
2866 }
2867 
2868 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2869 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2870 {
2871 	struct file *file = iocb->ki_filp;
2872 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2873 	struct inode *inode = file->f_mapping->host;
2874 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2875 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2876 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2877 	ssize_t rc;
2878 
2879 	rc = netfs_start_io_write(inode);
2880 	if (rc < 0)
2881 		return rc;
2882 
2883 	/*
2884 	 * We need to hold the sem to be sure nobody modifies lock list
2885 	 * with a brlock that prevents writing.
2886 	 */
2887 	down_read(&cinode->lock_sem);
2888 
2889 	rc = generic_write_checks(iocb, from);
2890 	if (rc <= 0)
2891 		goto out;
2892 
2893 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) &&
2894 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2895 				     server->vals->exclusive_lock_type, 0,
2896 				     NULL, CIFS_WRITE_OP))) {
2897 		rc = -EACCES;
2898 		goto out;
2899 	}
2900 
2901 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2902 
2903 out:
2904 	up_read(&cinode->lock_sem);
2905 	netfs_end_io_write(inode);
2906 	if (rc > 0)
2907 		rc = generic_write_sync(iocb, rc);
2908 	return rc;
2909 }
2910 
2911 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2912 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2913 {
2914 	struct inode *inode = file_inode(iocb->ki_filp);
2915 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2916 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2917 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2918 						iocb->ki_filp->private_data;
2919 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2920 	ssize_t written;
2921 
2922 	written = cifs_get_writer(cinode);
2923 	if (written)
2924 		return written;
2925 
2926 	if (CIFS_CACHE_WRITE(cinode)) {
2927 		if (cap_unix(tcon->ses) &&
2928 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2929 		    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2930 			written = netfs_file_write_iter(iocb, from);
2931 			goto out;
2932 		}
2933 		written = cifs_writev(iocb, from);
2934 		goto out;
2935 	}
2936 	/*
2937 	 * For non-oplocked files in strict cache mode we need to write the data
2938 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2939 	 * affected pages because it may cause a error with mandatory locks on
2940 	 * these pages but not on the region from pos to ppos+len-1.
2941 	 */
2942 	written = netfs_file_write_iter(iocb, from);
2943 	if (CIFS_CACHE_READ(cinode)) {
2944 		/*
2945 		 * We have read level caching and we have just sent a write
2946 		 * request to the server thus making data in the cache stale.
2947 		 * Zap the cache and set oplock/lease level to NONE to avoid
2948 		 * reading stale data from the cache. All subsequent read
2949 		 * operations will read new data from the server.
2950 		 */
2951 		cifs_zap_mapping(inode);
2952 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2953 			 inode);
2954 		cifs_reset_oplock(cinode);
2955 	}
2956 out:
2957 	cifs_put_writer(cinode);
2958 	return written;
2959 }
2960 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2961 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2962 {
2963 	ssize_t rc;
2964 	struct inode *inode = file_inode(iocb->ki_filp);
2965 
2966 	if (iocb->ki_flags & IOCB_DIRECT)
2967 		return netfs_unbuffered_read_iter(iocb, iter);
2968 
2969 	rc = cifs_revalidate_mapping(inode);
2970 	if (rc)
2971 		return rc;
2972 
2973 	return netfs_file_read_iter(iocb, iter);
2974 }
2975 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2976 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2977 {
2978 	struct inode *inode = file_inode(iocb->ki_filp);
2979 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2980 	ssize_t written;
2981 	int rc;
2982 
2983 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2984 		written = netfs_unbuffered_write_iter(iocb, from);
2985 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2986 			cifs_zap_mapping(inode);
2987 			cifs_dbg(FYI,
2988 				 "Set no oplock for inode=%p after a write operation\n",
2989 				 inode);
2990 			cifs_reset_oplock(cinode);
2991 		}
2992 		return written;
2993 	}
2994 
2995 	written = cifs_get_writer(cinode);
2996 	if (written)
2997 		return written;
2998 
2999 	written = netfs_file_write_iter(iocb, from);
3000 
3001 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
3002 		rc = filemap_fdatawrite(inode->i_mapping);
3003 		if (rc)
3004 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
3005 				 rc, inode);
3006 	}
3007 
3008 	cifs_put_writer(cinode);
3009 	return written;
3010 }
3011 
3012 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)3013 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3014 {
3015 	struct inode *inode = file_inode(iocb->ki_filp);
3016 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3017 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
3018 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3019 						iocb->ki_filp->private_data;
3020 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3021 	int rc = -EACCES;
3022 
3023 	/*
3024 	 * In strict cache mode we need to read from the server all the time
3025 	 * if we don't have level II oplock because the server can delay mtime
3026 	 * change - so we can't make a decision about inode invalidating.
3027 	 * And we can also fail with pagereading if there are mandatory locks
3028 	 * on pages affected by this read but not on the region from pos to
3029 	 * pos+len-1.
3030 	 */
3031 	if (!CIFS_CACHE_READ(cinode))
3032 		return netfs_unbuffered_read_iter(iocb, to);
3033 
3034 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0) {
3035 		if (iocb->ki_flags & IOCB_DIRECT)
3036 			return netfs_unbuffered_read_iter(iocb, to);
3037 		return netfs_buffered_read_iter(iocb, to);
3038 	}
3039 
3040 	/*
3041 	 * We need to hold the sem to be sure nobody modifies lock list
3042 	 * with a brlock that prevents reading.
3043 	 */
3044 	if (iocb->ki_flags & IOCB_DIRECT) {
3045 		rc = netfs_start_io_direct(inode);
3046 		if (rc < 0)
3047 			goto out;
3048 		rc = -EACCES;
3049 		down_read(&cinode->lock_sem);
3050 		if (!cifs_find_lock_conflict(
3051 			    cfile, iocb->ki_pos, iov_iter_count(to),
3052 			    tcon->ses->server->vals->shared_lock_type,
3053 			    0, NULL, CIFS_READ_OP))
3054 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
3055 		up_read(&cinode->lock_sem);
3056 		netfs_end_io_direct(inode);
3057 	} else {
3058 		rc = netfs_start_io_read(inode);
3059 		if (rc < 0)
3060 			goto out;
3061 		rc = -EACCES;
3062 		down_read(&cinode->lock_sem);
3063 		if (!cifs_find_lock_conflict(
3064 			    cfile, iocb->ki_pos, iov_iter_count(to),
3065 			    tcon->ses->server->vals->shared_lock_type,
3066 			    0, NULL, CIFS_READ_OP))
3067 			rc = filemap_read(iocb, to, 0);
3068 		up_read(&cinode->lock_sem);
3069 		netfs_end_io_read(inode);
3070 	}
3071 out:
3072 	return rc;
3073 }
3074 
cifs_page_mkwrite(struct vm_fault * vmf)3075 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3076 {
3077 	return netfs_page_mkwrite(vmf, NULL);
3078 }
3079 
3080 static const struct vm_operations_struct cifs_file_vm_ops = {
3081 	.fault = filemap_fault,
3082 	.map_pages = filemap_map_pages,
3083 	.page_mkwrite = cifs_page_mkwrite,
3084 };
3085 
cifs_file_strict_mmap_prepare(struct vm_area_desc * desc)3086 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3087 {
3088 	int xid, rc = 0;
3089 	struct inode *inode = file_inode(desc->file);
3090 
3091 	xid = get_xid();
3092 
3093 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3094 		rc = cifs_zap_mapping(inode);
3095 	if (!rc)
3096 		rc = generic_file_mmap_prepare(desc);
3097 	if (!rc)
3098 		desc->vm_ops = &cifs_file_vm_ops;
3099 
3100 	free_xid(xid);
3101 	return rc;
3102 }
3103 
cifs_file_mmap_prepare(struct vm_area_desc * desc)3104 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3105 {
3106 	int rc, xid;
3107 
3108 	xid = get_xid();
3109 
3110 	rc = cifs_revalidate_file(desc->file);
3111 	if (rc)
3112 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3113 			 rc);
3114 	if (!rc)
3115 		rc = generic_file_mmap_prepare(desc);
3116 	if (!rc)
3117 		desc->vm_ops = &cifs_file_vm_ops;
3118 
3119 	free_xid(xid);
3120 	return rc;
3121 }
3122 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3123 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3124 {
3125 	struct cifsFileInfo *open_file;
3126 
3127 	spin_lock(&cifs_inode->open_file_lock);
3128 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3129 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3130 			spin_unlock(&cifs_inode->open_file_lock);
3131 			return 1;
3132 		}
3133 	}
3134 	spin_unlock(&cifs_inode->open_file_lock);
3135 	return 0;
3136 }
3137 
3138 /* We do not want to update the file size from server for inodes
3139    open for write - to avoid races with writepage extending
3140    the file - in the future we could consider allowing
3141    refreshing the inode only on increases in the file size
3142    but this is tricky to do without racing with writebehind
3143    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3144 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3145 			    bool from_readdir)
3146 {
3147 	if (!cifsInode)
3148 		return true;
3149 
3150 	if (is_inode_writable(cifsInode) ||
3151 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3152 		/* This inode is open for write at least once */
3153 		struct cifs_sb_info *cifs_sb = CIFS_SB(cifsInode);
3154 
3155 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DIRECT_IO) {
3156 			/* since no page cache to corrupt on directio
3157 			we can change size safely */
3158 			return true;
3159 		}
3160 
3161 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3162 			return true;
3163 
3164 		return false;
3165 	} else
3166 		return true;
3167 }
3168 
cifs_oplock_break(struct work_struct * work)3169 void cifs_oplock_break(struct work_struct *work)
3170 {
3171 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3172 						  oplock_break);
3173 	struct inode *inode = d_inode(cfile->dentry);
3174 	struct super_block *sb = inode->i_sb;
3175 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3176 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3177 	bool cache_read, cache_write, cache_handle;
3178 	struct cifs_tcon *tcon;
3179 	struct TCP_Server_Info *server;
3180 	struct tcon_link *tlink;
3181 	unsigned int oplock;
3182 	int rc = 0;
3183 	bool purge_cache = false, oplock_break_cancelled;
3184 	__u64 persistent_fid, volatile_fid;
3185 	__u16 net_fid;
3186 
3187 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3188 			TASK_UNINTERRUPTIBLE);
3189 
3190 	tlink = cifs_sb_tlink(cifs_sb);
3191 	if (IS_ERR(tlink))
3192 		goto out;
3193 	tcon = tlink_tcon(tlink);
3194 	server = tcon->ses->server;
3195 
3196 	scoped_guard(spinlock, &cinode->open_file_lock) {
3197 		unsigned int sbflags = cifs_sb_flags(cifs_sb);
3198 
3199 		server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3200 					      cfile->oplock_epoch, &purge_cache);
3201 		oplock = READ_ONCE(cinode->oplock);
3202 		cache_read = (oplock & CIFS_CACHE_READ_FLG) ||
3203 			(sbflags & CIFS_MOUNT_RO_CACHE);
3204 		cache_write = (oplock & CIFS_CACHE_WRITE_FLG) ||
3205 			(sbflags & CIFS_MOUNT_RW_CACHE);
3206 		cache_handle = oplock & CIFS_CACHE_HANDLE_FLG;
3207 	}
3208 
3209 	if (!cache_write && cache_read && cifs_has_mand_locks(cinode)) {
3210 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3211 			 inode);
3212 		cifs_reset_oplock(cinode);
3213 		oplock = 0;
3214 		cache_read = cache_write = cache_handle = false;
3215 	}
3216 
3217 	if (S_ISREG(inode->i_mode)) {
3218 		if (cache_read)
3219 			break_lease(inode, O_RDONLY);
3220 		else
3221 			break_lease(inode, O_WRONLY);
3222 		rc = filemap_fdatawrite(inode->i_mapping);
3223 		if (!cache_read || purge_cache) {
3224 			rc = filemap_fdatawait(inode->i_mapping);
3225 			mapping_set_error(inode->i_mapping, rc);
3226 			cifs_zap_mapping(inode);
3227 		}
3228 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3229 		if (cache_write)
3230 			goto oplock_break_ack;
3231 	}
3232 
3233 	rc = cifs_push_locks(cfile);
3234 	if (rc)
3235 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3236 
3237 oplock_break_ack:
3238 	/*
3239 	 * When oplock break is received and there are no active
3240 	 * file handles but cached, then schedule deferred close immediately.
3241 	 * So, new open will not use cached handle.
3242 	 */
3243 
3244 	if (!cache_handle && !list_empty(&cinode->deferred_closes))
3245 		cifs_close_deferred_file(cinode);
3246 
3247 	persistent_fid = cfile->fid.persistent_fid;
3248 	volatile_fid = cfile->fid.volatile_fid;
3249 	net_fid = cfile->fid.netfid;
3250 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3251 
3252 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3253 	/*
3254 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3255 	 * an acknowledgment to be sent when the file has already been closed.
3256 	 */
3257 	spin_lock(&cinode->open_file_lock);
3258 	/* check list empty since can race with kill_sb calling tree disconnect */
3259 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3260 		spin_unlock(&cinode->open_file_lock);
3261 		rc = server->ops->oplock_response(tcon, persistent_fid,
3262 						  volatile_fid, net_fid,
3263 						  cinode, oplock);
3264 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3265 	} else
3266 		spin_unlock(&cinode->open_file_lock);
3267 
3268 	cifs_put_tlink(tlink);
3269 out:
3270 	cifs_done_oplock_break(cinode);
3271 }
3272 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3273 static int cifs_swap_activate(struct swap_info_struct *sis,
3274 			      struct file *swap_file, sector_t *span)
3275 {
3276 	struct cifsFileInfo *cfile = swap_file->private_data;
3277 	struct inode *inode = swap_file->f_mapping->host;
3278 	unsigned long blocks;
3279 	long long isize;
3280 
3281 	cifs_dbg(FYI, "swap activate\n");
3282 
3283 	if (!swap_file->f_mapping->a_ops->swap_rw)
3284 		/* Cannot support swap */
3285 		return -EINVAL;
3286 
3287 	spin_lock(&inode->i_lock);
3288 	blocks = inode->i_blocks;
3289 	isize = inode->i_size;
3290 	spin_unlock(&inode->i_lock);
3291 	if (blocks*512 < isize) {
3292 		pr_warn("swap activate: swapfile has holes\n");
3293 		return -EINVAL;
3294 	}
3295 	*span = sis->pages;
3296 
3297 	pr_warn_once("Swap support over SMB3 is experimental\n");
3298 
3299 	/*
3300 	 * TODO: consider adding ACL (or documenting how) to prevent other
3301 	 * users (on this or other systems) from reading it
3302 	 */
3303 
3304 
3305 	/* TODO: add sk_set_memalloc(inet) or similar */
3306 
3307 	if (cfile)
3308 		cfile->swapfile = true;
3309 	/*
3310 	 * TODO: Since file already open, we can't open with DENY_ALL here
3311 	 * but we could add call to grab a byte range lock to prevent others
3312 	 * from reading or writing the file
3313 	 */
3314 
3315 	sis->flags |= SWP_FS_OPS;
3316 	return add_swap_extent(sis, 0, sis->max, 0);
3317 }
3318 
cifs_swap_deactivate(struct file * file)3319 static void cifs_swap_deactivate(struct file *file)
3320 {
3321 	struct cifsFileInfo *cfile = file->private_data;
3322 
3323 	cifs_dbg(FYI, "swap deactivate\n");
3324 
3325 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3326 
3327 	if (cfile)
3328 		cfile->swapfile = false;
3329 
3330 	/* do we need to unpin (or unlock) the file */
3331 }
3332 
3333 /**
3334  * cifs_swap_rw - SMB3 address space operation for swap I/O
3335  * @iocb: target I/O control block
3336  * @iter: I/O buffer
3337  *
3338  * Perform IO to the swap-file.  This is much like direct IO.
3339  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3340 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3341 {
3342 	ssize_t ret;
3343 
3344 	if (iov_iter_rw(iter) == READ)
3345 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3346 	else
3347 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3348 	if (ret < 0)
3349 		return ret;
3350 	return 0;
3351 }
3352 
3353 const struct address_space_operations cifs_addr_ops = {
3354 	.read_folio	= netfs_read_folio,
3355 	.readahead	= netfs_readahead,
3356 	.writepages	= netfs_writepages,
3357 	.dirty_folio	= netfs_dirty_folio,
3358 	.release_folio	= netfs_release_folio,
3359 	.direct_IO	= noop_direct_IO,
3360 	.invalidate_folio = netfs_invalidate_folio,
3361 	.migrate_folio	= filemap_migrate_folio,
3362 	/*
3363 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3364 	 * helper if needed
3365 	 */
3366 	.swap_activate	= cifs_swap_activate,
3367 	.swap_deactivate = cifs_swap_deactivate,
3368 	.swap_rw = cifs_swap_rw,
3369 };
3370 
3371 /*
3372  * cifs_readahead requires the server to support a buffer large enough to
3373  * contain the header plus one complete page of data.  Otherwise, we need
3374  * to leave cifs_readahead out of the address space operations.
3375  */
3376 const struct address_space_operations cifs_addr_ops_smallbuf = {
3377 	.read_folio	= netfs_read_folio,
3378 	.writepages	= netfs_writepages,
3379 	.dirty_folio	= netfs_dirty_folio,
3380 	.release_folio	= netfs_release_folio,
3381 	.invalidate_folio = netfs_invalidate_folio,
3382 	.migrate_folio	= filemap_migrate_folio,
3383 };
3384