xref: /linux/fs/smb/client/cifsfs.c (revision 81dc1e4d32b064ac47abc60b0acbf49b66a34d52) !
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <linux/mm.h>
32 #include <linux/key-type.h>
33 #include <uapi/linux/magic.h>
34 #include <net/ipv6.h>
35 #include "cifsfs.h"
36 #define DECLARE_GLOBALS_HERE
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "smb2proto.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 
74 /*
75  * Global transaction id (XID) information
76  */
77 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
79 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
80 DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
81 
82 /*
83  *  Global counters, updated atomically
84  */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91 
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head	cifs_tcp_ses_list;
100 DEFINE_SPINLOCK(cifs_tcp_ses_lock);
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 				 "for CIFS requests. "
106 				 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 				"1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 				 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 				   "CIFS/SMB1 dialect (N/A for SMB3) "
119 				   "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 /* Module-wide total cached dirents (in bytes) across all tcons */
125 atomic64_t cifs_dircache_bytes_used = ATOMIC64_INIT(0);
126 
127 atomic_t cifs_sillycounter;
128 atomic_t cifs_tmpcounter;
129 
130 /*
131  * Write-only module parameter to drop all cached directory entries across
132  * all CIFS mounts. Echo a non-zero value to trigger.
133  */
cifs_drop_all_dir_caches(void)134 static void cifs_drop_all_dir_caches(void)
135 {
136 	struct TCP_Server_Info *server;
137 	struct cifs_ses *ses;
138 	struct cifs_tcon *tcon;
139 
140 	spin_lock(&cifs_tcp_ses_lock);
141 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
142 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
143 			if (cifs_ses_exiting(ses))
144 				continue;
145 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
146 				invalidate_all_cached_dirs(tcon);
147 		}
148 	}
149 	spin_unlock(&cifs_tcp_ses_lock);
150 }
151 
cifs_param_set_drop_dir_cache(const char * val,const struct kernel_param * kp)152 static int cifs_param_set_drop_dir_cache(const char *val, const struct kernel_param *kp)
153 {
154 	bool bv;
155 	int rc = kstrtobool(val, &bv);
156 
157 	if (rc)
158 		return rc;
159 	if (bv)
160 		cifs_drop_all_dir_caches();
161 	return 0;
162 }
163 
164 module_param_call(drop_dir_cache, cifs_param_set_drop_dir_cache, NULL, NULL, 0200);
165 MODULE_PARM_DESC(drop_dir_cache, "Write 1 to drop all cached directory entries across all CIFS mounts");
166 
167 #ifdef CONFIG_CIFS_STATS2
168 unsigned int slow_rsp_threshold = 1;
169 module_param(slow_rsp_threshold, uint, 0644);
170 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
171 				   "before logging that a response is delayed. "
172 				   "Default: 1 (if set to 0 disables msg).");
173 #endif /* STATS2 */
174 
175 module_param(enable_oplocks, bool, 0644);
176 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
177 
178 module_param(enable_gcm_256, bool, 0644);
179 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1");
180 
181 module_param(require_gcm_256, bool, 0644);
182 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
183 
184 module_param(enable_negotiate_signing, bool, 0644);
185 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
186 
187 module_param(disable_legacy_dialects, bool, 0644);
188 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
189 				  "helpful to restrict the ability to "
190 				  "override the default dialects (SMB2.1, "
191 				  "SMB3 and SMB3.02) on mount with old "
192 				  "dialects (CIFS/SMB1 and SMB2) since "
193 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
194 				  " and less secure. Default: n/N/0");
195 
196 struct workqueue_struct	*cifsiod_wq;
197 struct workqueue_struct	*decrypt_wq;
198 struct workqueue_struct	*fileinfo_put_wq;
199 struct workqueue_struct	*cifsoplockd_wq;
200 struct workqueue_struct	*deferredclose_wq;
201 struct workqueue_struct	*serverclose_wq;
202 struct workqueue_struct	*cfid_put_wq;
203 __u32 cifs_lock_secret;
204 
205 /*
206  * Bumps refcount for cifs super block.
207  * Note that it should be only called if a reference to VFS super block is
208  * already held, e.g. in open-type syscalls context. Otherwise it can race with
209  * atomic_dec_and_test in deactivate_locked_super.
210  */
211 void
cifs_sb_active(struct super_block * sb)212 cifs_sb_active(struct super_block *sb)
213 {
214 	struct cifs_sb_info *server = CIFS_SB(sb);
215 
216 	if (atomic_inc_return(&server->active) == 1)
217 		atomic_inc(&sb->s_active);
218 }
219 
220 void
cifs_sb_deactive(struct super_block * sb)221 cifs_sb_deactive(struct super_block *sb)
222 {
223 	struct cifs_sb_info *server = CIFS_SB(sb);
224 
225 	if (atomic_dec_and_test(&server->active))
226 		deactivate_super(sb);
227 }
228 
229 static int
cifs_read_super(struct super_block * sb)230 cifs_read_super(struct super_block *sb)
231 {
232 	struct cifs_sb_info *cifs_sb;
233 	struct cifs_tcon *tcon;
234 	unsigned int sbflags;
235 	struct timespec64 ts;
236 	struct inode *inode;
237 	int rc = 0;
238 
239 	cifs_sb = CIFS_SB(sb);
240 	tcon = cifs_sb_master_tcon(cifs_sb);
241 	sbflags = cifs_sb_flags(cifs_sb);
242 
243 	if (sbflags & CIFS_MOUNT_POSIXACL)
244 		sb->s_flags |= SB_POSIXACL;
245 
246 	if (tcon->snapshot_time)
247 		sb->s_flags |= SB_RDONLY;
248 
249 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
250 		sb->s_maxbytes = MAX_LFS_FILESIZE;
251 	else
252 		sb->s_maxbytes = MAX_NON_LFS;
253 
254 	/*
255 	 * Some very old servers like DOS and OS/2 used 2 second granularity
256 	 * (while all current servers use 100ns granularity - see MS-DTYP)
257 	 * but 1 second is the maximum allowed granularity for the VFS
258 	 * so for old servers set time granularity to 1 second while for
259 	 * everything else (current servers) set it to 100ns.
260 	 */
261 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
262 	    ((tcon->ses->capabilities &
263 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
264 	    !tcon->unix_ext) {
265 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
266 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
267 		sb->s_time_min = ts.tv_sec;
268 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
269 				    cpu_to_le16(SMB_TIME_MAX), 0);
270 		sb->s_time_max = ts.tv_sec;
271 	} else {
272 		/*
273 		 * Almost every server, including all SMB2+, uses DCE TIME
274 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
275 		 */
276 		sb->s_time_gran = 100;
277 		ts = cifs_NTtimeToUnix(0);
278 		sb->s_time_min = ts.tv_sec;
279 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
280 		sb->s_time_max = ts.tv_sec;
281 	}
282 
283 	sb->s_magic = CIFS_SUPER_MAGIC;
284 	sb->s_op = &cifs_super_ops;
285 	sb->s_xattr = cifs_xattr_handlers;
286 	rc = super_setup_bdi(sb);
287 	if (rc)
288 		goto out_no_root;
289 	/* tune readahead according to rsize if readahead size not set on mount */
290 	if (cifs_sb->ctx->rsize == 0)
291 		cifs_sb->ctx->rsize =
292 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
293 	if (cifs_sb->ctx->rasize)
294 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
295 	else
296 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
297 
298 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
299 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
300 	inode = cifs_root_iget(sb);
301 
302 	if (IS_ERR(inode)) {
303 		rc = PTR_ERR(inode);
304 		goto out_no_root;
305 	}
306 
307 	if (tcon->nocase)
308 		set_default_d_op(sb, &cifs_ci_dentry_ops);
309 	else
310 		set_default_d_op(sb, &cifs_dentry_ops);
311 
312 	sb->s_root = d_make_root(inode);
313 	if (!sb->s_root) {
314 		rc = -ENOMEM;
315 		goto out_no_root;
316 	}
317 
318 #ifdef CONFIG_CIFS_NFSD_EXPORT
319 	if (sbflags & CIFS_MOUNT_SERVER_INUM) {
320 		cifs_dbg(FYI, "export ops supported\n");
321 		sb->s_export_op = &cifs_export_ops;
322 	}
323 #endif /* CONFIG_CIFS_NFSD_EXPORT */
324 
325 	return 0;
326 
327 out_no_root:
328 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
329 	return rc;
330 }
331 
cifs_kill_sb(struct super_block * sb)332 static void cifs_kill_sb(struct super_block *sb)
333 {
334 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
335 
336 	/*
337 	 * We need to release all dentries for the cached directories
338 	 * and close all deferred file handles before we kill the sb.
339 	 */
340 	if (cifs_sb->root) {
341 		close_all_cached_dirs(cifs_sb);
342 		cifs_close_all_deferred_files_sb(cifs_sb);
343 
344 		/* Wait for all pending oplock breaks to complete */
345 		flush_workqueue(cifsoplockd_wq);
346 
347 		/* finally release root dentry */
348 		dput(cifs_sb->root);
349 		cifs_sb->root = NULL;
350 	}
351 
352 	kill_anon_super(sb);
353 	cifs_umount(cifs_sb);
354 }
355 
356 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)357 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
358 {
359 	struct super_block *sb = dentry->d_sb;
360 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
361 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
362 	struct TCP_Server_Info *server = tcon->ses->server;
363 	unsigned int xid;
364 	int rc = 0;
365 	const char *full_path;
366 	void *page;
367 
368 	xid = get_xid();
369 	page = alloc_dentry_path();
370 
371 	full_path = build_path_from_dentry(dentry, page);
372 	if (IS_ERR(full_path)) {
373 		rc = PTR_ERR(full_path);
374 		goto statfs_out;
375 	}
376 
377 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
378 		buf->f_namelen =
379 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
380 	else
381 		buf->f_namelen = PATH_MAX;
382 
383 	buf->f_fsid.val[0] = tcon->vol_serial_number;
384 	/* are using part of create time for more randomness, see man statfs */
385 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
386 
387 	buf->f_files = 0;	/* undefined */
388 	buf->f_ffree = 0;	/* unlimited */
389 
390 	if (server->ops->queryfs)
391 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
392 
393 statfs_out:
394 	free_dentry_path(page);
395 	free_xid(xid);
396 	return rc;
397 }
398 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)399 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
400 {
401 	struct cifs_tcon *tcon = cifs_sb_master_tcon(CIFS_SB(file));
402 	struct TCP_Server_Info *server = tcon->ses->server;
403 	struct inode *inode = file_inode(file);
404 	int rc;
405 
406 	if (!server->ops->fallocate)
407 		return -EOPNOTSUPP;
408 
409 	rc = inode_lock_killable(inode);
410 	if (rc)
411 		return rc;
412 
413 	netfs_wait_for_outstanding_io(inode);
414 
415 	rc = file_modified(file);
416 	if (rc)
417 		goto out_unlock;
418 
419 	rc = server->ops->fallocate(file, tcon, mode, off, len);
420 
421 out_unlock:
422 	inode_unlock(inode);
423 	return rc;
424 }
425 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)426 static int cifs_permission(struct mnt_idmap *idmap,
427 			   struct inode *inode, int mask)
428 {
429 	unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
430 
431 	if (sbflags & CIFS_MOUNT_NO_PERM) {
432 		if ((mask & MAY_EXEC) && !execute_ok(inode))
433 			return -EACCES;
434 		else
435 			return 0;
436 	} else /* file mode might have been restricted at mount time
437 		on the client (above and beyond ACL on servers) for
438 		servers which do not support setting and viewing mode bits,
439 		so allowing client to check permissions is useful */
440 		return generic_permission(&nop_mnt_idmap, inode, mask);
441 }
442 
443 static struct kmem_cache *cifs_inode_cachep;
444 static struct kmem_cache *cifs_req_cachep;
445 static struct kmem_cache *cifs_mid_cachep;
446 static struct kmem_cache *cifs_sm_req_cachep;
447 static struct kmem_cache *cifs_io_request_cachep;
448 static struct kmem_cache *cifs_io_subrequest_cachep;
449 mempool_t *cifs_sm_req_poolp;
450 mempool_t *cifs_req_poolp;
451 mempool_t cifs_mid_pool;
452 mempool_t cifs_io_request_pool;
453 mempool_t cifs_io_subrequest_pool;
454 
455 static struct inode *
cifs_alloc_inode(struct super_block * sb)456 cifs_alloc_inode(struct super_block *sb)
457 {
458 	struct cifsInodeInfo *cifs_inode;
459 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
460 	if (!cifs_inode)
461 		return NULL;
462 	cifs_inode->cifsAttrs = ATTR_ARCHIVE;	/* default */
463 	cifs_inode->time = 0;
464 	/*
465 	 * Until the file is open and we have gotten oplock info back from the
466 	 * server, can not assume caching of file data or metadata.
467 	 */
468 	cifs_set_oplock_level(cifs_inode, 0);
469 	cifs_inode->lease_granted = false;
470 	cifs_inode->flags = 0;
471 	spin_lock_init(&cifs_inode->writers_lock);
472 	cifs_inode->writers = 0;
473 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
474 	cifs_inode->netfs.remote_i_size = 0;
475 	cifs_inode->uniqueid = 0;
476 	cifs_inode->createtime = 0;
477 	cifs_inode->epoch = 0;
478 	spin_lock_init(&cifs_inode->open_file_lock);
479 	generate_random_uuid(cifs_inode->lease_key);
480 	cifs_inode->symlink_target = NULL;
481 
482 	/*
483 	 * Can not set i_flags here - they get immediately overwritten to zero
484 	 * by the VFS.
485 	 */
486 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
487 	INIT_LIST_HEAD(&cifs_inode->openFileList);
488 	INIT_LIST_HEAD(&cifs_inode->llist);
489 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
490 	spin_lock_init(&cifs_inode->deferred_lock);
491 	return &cifs_inode->netfs.inode;
492 }
493 
494 static void
cifs_free_inode(struct inode * inode)495 cifs_free_inode(struct inode *inode)
496 {
497 	struct cifsInodeInfo *cinode = CIFS_I(inode);
498 
499 	if (S_ISLNK(inode->i_mode))
500 		kfree(cinode->symlink_target);
501 	kmem_cache_free(cifs_inode_cachep, cinode);
502 }
503 
504 static void
cifs_evict_inode(struct inode * inode)505 cifs_evict_inode(struct inode *inode)
506 {
507 	netfs_wait_for_outstanding_io(inode);
508 	truncate_inode_pages_final(&inode->i_data);
509 	if (inode_state_read_once(inode) & I_PINNING_NETFS_WB)
510 		cifs_fscache_unuse_inode_cookie(inode, true);
511 	cifs_fscache_release_inode_cookie(inode);
512 	clear_inode(inode);
513 }
514 
515 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)516 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
517 {
518 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
519 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
520 
521 	seq_puts(s, ",addr=");
522 
523 	switch (server->dstaddr.ss_family) {
524 	case AF_INET:
525 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
526 		break;
527 	case AF_INET6:
528 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
529 		if (sa6->sin6_scope_id)
530 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
531 		break;
532 	default:
533 		seq_puts(s, "(unknown)");
534 	}
535 	if (server->rdma)
536 		seq_puts(s, ",rdma");
537 }
538 
539 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)540 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
541 {
542 	if (ses->sectype == Unspecified) {
543 		if (ses->user_name == NULL)
544 			seq_puts(s, ",sec=none");
545 		return;
546 	}
547 
548 	seq_puts(s, ",sec=");
549 
550 	switch (ses->sectype) {
551 	case NTLMv2:
552 		seq_puts(s, "ntlmv2");
553 		break;
554 	case Kerberos:
555 		seq_puts(s, "krb5");
556 		break;
557 	case RawNTLMSSP:
558 		seq_puts(s, "ntlmssp");
559 		break;
560 	default:
561 		/* shouldn't ever happen */
562 		seq_puts(s, "unknown");
563 		break;
564 	}
565 
566 	if (ses->sign)
567 		seq_puts(s, "i");
568 
569 	if (ses->sectype == Kerberos)
570 		seq_printf(s, ",cruid=%u",
571 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
572 }
573 
574 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)575 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
576 {
577 	unsigned int sbflags = cifs_sb_flags(cifs_sb);
578 
579 	seq_puts(s, ",cache=");
580 
581 	if (sbflags & CIFS_MOUNT_STRICT_IO)
582 		seq_puts(s, "strict");
583 	else if (sbflags & CIFS_MOUNT_DIRECT_IO)
584 		seq_puts(s, "none");
585 	else if (sbflags & CIFS_MOUNT_RW_CACHE)
586 		seq_puts(s, "singleclient"); /* assume only one client access */
587 	else if (sbflags & CIFS_MOUNT_RO_CACHE)
588 		seq_puts(s, "ro"); /* read only caching assumed */
589 	else
590 		seq_puts(s, "loose");
591 }
592 
593 /*
594  * cifs_show_devname() is used so we show the mount device name with correct
595  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
596  */
cifs_show_devname(struct seq_file * m,struct dentry * root)597 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
598 {
599 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
600 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
601 
602 	if (devname == NULL)
603 		seq_puts(m, "none");
604 	else {
605 		convert_delimiter(devname, '/');
606 		/* escape all spaces in share names */
607 		seq_escape(m, devname, " \t");
608 		kfree(devname);
609 	}
610 	return 0;
611 }
612 
613 static void
cifs_show_upcall_target(struct seq_file * s,struct cifs_sb_info * cifs_sb)614 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
615 {
616 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
617 		seq_puts(s, ",upcall_target=app");
618 		return;
619 	}
620 
621 	seq_puts(s, ",upcall_target=");
622 
623 	switch (cifs_sb->ctx->upcall_target) {
624 	case UPTARGET_APP:
625 		seq_puts(s, "app");
626 		break;
627 	case UPTARGET_MOUNT:
628 		seq_puts(s, "mount");
629 		break;
630 	default:
631 		/* shouldn't ever happen */
632 		seq_puts(s, "unknown");
633 		break;
634 	}
635 }
636 
637 /*
638  * cifs_show_options() is for displaying mount options in /proc/mounts.
639  * Not all settable options are displayed but most of the important
640  * ones are.
641  */
642 static int
cifs_show_options(struct seq_file * s,struct dentry * root)643 cifs_show_options(struct seq_file *s, struct dentry *root)
644 {
645 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
646 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
647 	struct sockaddr *srcaddr;
648 	unsigned int sbflags;
649 
650 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
651 
652 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
653 	cifs_show_security(s, tcon->ses);
654 	cifs_show_cache_flavor(s, cifs_sb);
655 	cifs_show_upcall_target(s, cifs_sb);
656 
657 	if (tcon->no_lease)
658 		seq_puts(s, ",nolease");
659 	if (cifs_sb->ctx->multiuser)
660 		seq_puts(s, ",multiuser");
661 	else if (tcon->ses->user_name)
662 		seq_show_option(s, "username", tcon->ses->user_name);
663 
664 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
665 		seq_show_option(s, "domain", tcon->ses->domainName);
666 
667 	if (srcaddr->sa_family != AF_UNSPEC) {
668 		struct sockaddr_in *saddr4;
669 		struct sockaddr_in6 *saddr6;
670 		saddr4 = (struct sockaddr_in *)srcaddr;
671 		saddr6 = (struct sockaddr_in6 *)srcaddr;
672 		if (srcaddr->sa_family == AF_INET6)
673 			seq_printf(s, ",srcaddr=%pI6c",
674 				   &saddr6->sin6_addr);
675 		else if (srcaddr->sa_family == AF_INET)
676 			seq_printf(s, ",srcaddr=%pI4",
677 				   &saddr4->sin_addr.s_addr);
678 		else
679 			seq_printf(s, ",srcaddr=BAD-AF:%i",
680 				   (int)(srcaddr->sa_family));
681 	}
682 
683 	sbflags = cifs_sb_flags(cifs_sb);
684 	seq_printf(s, ",uid=%u",
685 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
686 	if (sbflags & CIFS_MOUNT_OVERR_UID)
687 		seq_puts(s, ",forceuid");
688 	else
689 		seq_puts(s, ",noforceuid");
690 
691 	seq_printf(s, ",gid=%u",
692 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
693 	if (sbflags & CIFS_MOUNT_OVERR_GID)
694 		seq_puts(s, ",forcegid");
695 	else
696 		seq_puts(s, ",noforcegid");
697 
698 	cifs_show_address(s, tcon->ses->server);
699 
700 	if (!tcon->unix_ext)
701 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
702 					   cifs_sb->ctx->file_mode,
703 					   cifs_sb->ctx->dir_mode);
704 	if (cifs_sb->ctx->iocharset)
705 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
706 	if (tcon->ses->unicode == 0)
707 		seq_puts(s, ",nounicode");
708 	else if (tcon->ses->unicode == 1)
709 		seq_puts(s, ",unicode");
710 	if (tcon->seal)
711 		seq_puts(s, ",seal");
712 	else if (tcon->ses->server->ignore_signature)
713 		seq_puts(s, ",signloosely");
714 	if (tcon->nocase)
715 		seq_puts(s, ",nocase");
716 	if (tcon->nodelete)
717 		seq_puts(s, ",nodelete");
718 	if (cifs_sb->ctx->no_sparse)
719 		seq_puts(s, ",nosparse");
720 	if (tcon->local_lease)
721 		seq_puts(s, ",locallease");
722 	if (tcon->retry)
723 		seq_puts(s, ",hard");
724 	else
725 		seq_puts(s, ",soft");
726 	if (tcon->use_persistent)
727 		seq_puts(s, ",persistenthandles");
728 	else if (tcon->use_resilient)
729 		seq_puts(s, ",resilienthandles");
730 	if (tcon->posix_extensions)
731 		seq_puts(s, ",posix");
732 	else if (tcon->unix_ext)
733 		seq_puts(s, ",unix");
734 	else
735 		seq_puts(s, ",nounix");
736 	if (sbflags & CIFS_MOUNT_NO_DFS)
737 		seq_puts(s, ",nodfs");
738 	if (sbflags & CIFS_MOUNT_POSIX_PATHS)
739 		seq_puts(s, ",posixpaths");
740 	if (sbflags & CIFS_MOUNT_SET_UID)
741 		seq_puts(s, ",setuids");
742 	if (sbflags & CIFS_MOUNT_UID_FROM_ACL)
743 		seq_puts(s, ",idsfromsid");
744 	if (sbflags & CIFS_MOUNT_SERVER_INUM)
745 		seq_puts(s, ",serverino");
746 	if (sbflags & CIFS_MOUNT_RWPIDFORWARD)
747 		seq_puts(s, ",rwpidforward");
748 	if (sbflags & CIFS_MOUNT_NOPOSIXBRL)
749 		seq_puts(s, ",forcemand");
750 	if (sbflags & CIFS_MOUNT_NO_XATTR)
751 		seq_puts(s, ",nouser_xattr");
752 	if (sbflags & CIFS_MOUNT_MAP_SPECIAL_CHR)
753 		seq_puts(s, ",mapchars");
754 	if (sbflags & CIFS_MOUNT_MAP_SFM_CHR)
755 		seq_puts(s, ",mapposix");
756 	if (sbflags & CIFS_MOUNT_UNX_EMUL)
757 		seq_puts(s, ",sfu");
758 	if (sbflags & CIFS_MOUNT_NO_BRL)
759 		seq_puts(s, ",nobrl");
760 	if (sbflags & CIFS_MOUNT_NO_HANDLE_CACHE)
761 		seq_puts(s, ",nohandlecache");
762 	if (sbflags & CIFS_MOUNT_MODE_FROM_SID)
763 		seq_puts(s, ",modefromsid");
764 	if (sbflags & CIFS_MOUNT_CIFS_ACL)
765 		seq_puts(s, ",cifsacl");
766 	if (sbflags & CIFS_MOUNT_DYNPERM)
767 		seq_puts(s, ",dynperm");
768 	if (root->d_sb->s_flags & SB_POSIXACL)
769 		seq_puts(s, ",acl");
770 	if (sbflags & CIFS_MOUNT_MF_SYMLINKS)
771 		seq_puts(s, ",mfsymlinks");
772 	if (sbflags & CIFS_MOUNT_FSCACHE)
773 		seq_puts(s, ",fsc");
774 	if (sbflags & CIFS_MOUNT_NOSSYNC)
775 		seq_puts(s, ",nostrictsync");
776 	if (sbflags & CIFS_MOUNT_NO_PERM)
777 		seq_puts(s, ",noperm");
778 	if (sbflags & CIFS_MOUNT_CIFS_BACKUPUID)
779 		seq_printf(s, ",backupuid=%u",
780 			   from_kuid_munged(&init_user_ns,
781 					    cifs_sb->ctx->backupuid));
782 	if (sbflags & CIFS_MOUNT_CIFS_BACKUPGID)
783 		seq_printf(s, ",backupgid=%u",
784 			   from_kgid_munged(&init_user_ns,
785 					    cifs_sb->ctx->backupgid));
786 	seq_show_option(s, "reparse",
787 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
788 	if (cifs_sb->ctx->nonativesocket)
789 		seq_puts(s, ",nonativesocket");
790 	else
791 		seq_puts(s, ",nativesocket");
792 	seq_show_option(s, "symlink",
793 			cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
794 
795 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
796 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
797 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
798 	if (cifs_sb->ctx->rasize)
799 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
800 	if (tcon->ses->server->min_offload)
801 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
802 	if (tcon->ses->server->retrans)
803 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
804 	seq_printf(s, ",echo_interval=%lu",
805 			tcon->ses->server->echo_interval / HZ);
806 
807 	/* Only display the following if overridden on mount */
808 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
809 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
810 	if (tcon->ses->server->tcp_nodelay)
811 		seq_puts(s, ",tcpnodelay");
812 	if (tcon->ses->server->noautotune)
813 		seq_puts(s, ",noautotune");
814 	if (tcon->ses->server->noblocksnd)
815 		seq_puts(s, ",noblocksend");
816 	if (tcon->ses->server->nosharesock)
817 		seq_puts(s, ",nosharesock");
818 
819 	if (tcon->snapshot_time)
820 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
821 	if (tcon->handle_timeout)
822 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
823 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
824 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
825 
826 	/*
827 	 * Display file and directory attribute timeout in seconds.
828 	 * If file and directory attribute timeout the same then actimeo
829 	 * was likely specified on mount
830 	 */
831 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
832 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
833 	else {
834 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
835 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
836 	}
837 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
838 
839 	if (tcon->ses->chan_max > 1)
840 		seq_printf(s, ",multichannel,max_channels=%zu",
841 			   tcon->ses->chan_max);
842 
843 	if (tcon->use_witness)
844 		seq_puts(s, ",witness");
845 
846 	return 0;
847 }
848 
cifs_umount_begin(struct super_block * sb)849 static void cifs_umount_begin(struct super_block *sb)
850 {
851 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
852 	struct cifs_tcon *tcon;
853 
854 	if (cifs_sb == NULL)
855 		return;
856 
857 	tcon = cifs_sb_master_tcon(cifs_sb);
858 
859 	spin_lock(&cifs_tcp_ses_lock);
860 	spin_lock(&tcon->tc_lock);
861 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
862 			    netfs_trace_tcon_ref_see_umount);
863 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
864 		/* we have other mounts to same share or we have
865 		   already tried to umount this and woken up
866 		   all waiting network requests, nothing to do */
867 		spin_unlock(&tcon->tc_lock);
868 		spin_unlock(&cifs_tcp_ses_lock);
869 		return;
870 	}
871 	/*
872 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
873 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
874 	 */
875 	spin_unlock(&tcon->tc_lock);
876 	spin_unlock(&cifs_tcp_ses_lock);
877 
878 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
879 	/* cancel_notify_requests(tcon); */
880 	if (tcon->ses && tcon->ses->server) {
881 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
882 		wake_up_all(&tcon->ses->server->request_q);
883 		wake_up_all(&tcon->ses->server->response_q);
884 		msleep(1); /* yield */
885 		/* we have to kick the requests once more */
886 		wake_up_all(&tcon->ses->server->response_q);
887 		msleep(1);
888 	}
889 
890 	return;
891 }
892 
cifs_freeze(struct super_block * sb)893 static int cifs_freeze(struct super_block *sb)
894 {
895 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
896 	struct cifs_tcon *tcon;
897 
898 	if (cifs_sb == NULL)
899 		return 0;
900 
901 	tcon = cifs_sb_master_tcon(cifs_sb);
902 
903 	cifs_close_all_deferred_files(tcon);
904 	return 0;
905 }
906 
907 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)908 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
909 {
910 	/* BB FIXME */
911 	return 0;
912 }
913 #endif
914 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)915 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
916 {
917 	return netfs_unpin_writeback(inode, wbc);
918 }
919 
cifs_drop_inode(struct inode * inode)920 static int cifs_drop_inode(struct inode *inode)
921 {
922 	unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
923 
924 	/* no serverino => unconditional eviction */
925 	return !(sbflags & CIFS_MOUNT_SERVER_INUM) ||
926 		inode_generic_drop(inode);
927 }
928 
929 static const struct super_operations cifs_super_ops = {
930 	.statfs = cifs_statfs,
931 	.alloc_inode = cifs_alloc_inode,
932 	.write_inode	= cifs_write_inode,
933 	.free_inode = cifs_free_inode,
934 	.drop_inode	= cifs_drop_inode,
935 	.evict_inode	= cifs_evict_inode,
936 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
937 	.show_devname   = cifs_show_devname,
938 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
939 	function unless later we add lazy close of inodes or unless the
940 	kernel forgets to call us with the same number of releases (closes)
941 	as opens */
942 	.show_options = cifs_show_options,
943 	.umount_begin   = cifs_umount_begin,
944 	.freeze_fs      = cifs_freeze,
945 #ifdef CONFIG_CIFS_STATS2
946 	.show_stats = cifs_show_stats,
947 #endif
948 };
949 
950 /*
951  * Get root dentry from superblock according to prefix path mount option.
952  * Return dentry with refcount + 1 on success and NULL otherwise.
953  */
954 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)955 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
956 {
957 	struct dentry *dentry;
958 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
959 	char *full_path = NULL;
960 	char *s, *p;
961 	char sep;
962 
963 	if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH)
964 		return dget(sb->s_root);
965 
966 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
967 				cifs_sb_master_tcon(cifs_sb), 0);
968 	if (full_path == NULL)
969 		return ERR_PTR(-ENOMEM);
970 
971 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
972 
973 	sep = CIFS_DIR_SEP(cifs_sb);
974 	dentry = dget(sb->s_root);
975 	s = full_path;
976 
977 	do {
978 		struct inode *dir = d_inode(dentry);
979 		struct dentry *child;
980 
981 		if (!S_ISDIR(dir->i_mode)) {
982 			dput(dentry);
983 			dentry = ERR_PTR(-ENOTDIR);
984 			break;
985 		}
986 
987 		/* skip separators */
988 		while (*s == sep)
989 			s++;
990 		if (!*s)
991 			break;
992 		p = s++;
993 		/* next separator */
994 		while (*s && *s != sep)
995 			s++;
996 
997 		child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
998 							dentry);
999 		dput(dentry);
1000 		dentry = child;
1001 	} while (!IS_ERR(dentry));
1002 	kfree(full_path);
1003 	return dentry;
1004 }
1005 
cifs_set_super(struct super_block * sb,void * data)1006 static int cifs_set_super(struct super_block *sb, void *data)
1007 {
1008 	struct cifs_mnt_data *mnt_data = data;
1009 	sb->s_fs_info = mnt_data->cifs_sb;
1010 	return set_anon_super(sb, NULL);
1011 }
1012 
1013 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)1014 cifs_smb3_do_mount(struct file_system_type *fs_type,
1015 	      int flags, struct smb3_fs_context *old_ctx)
1016 {
1017 	struct cifs_mnt_data mnt_data;
1018 	struct cifs_sb_info *cifs_sb;
1019 	struct super_block *sb;
1020 	struct dentry *root;
1021 	int rc;
1022 
1023 	if (cifsFYI) {
1024 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
1025 			 old_ctx->source, flags);
1026 	} else {
1027 		cifs_info("Attempting to mount %s\n", old_ctx->source);
1028 	}
1029 	cifs_sb = kzalloc_obj(*cifs_sb);
1030 	if (!cifs_sb)
1031 		return ERR_PTR(-ENOMEM);
1032 
1033 	cifs_sb->ctx = kzalloc_obj(struct smb3_fs_context);
1034 	if (!cifs_sb->ctx) {
1035 		root = ERR_PTR(-ENOMEM);
1036 		goto out;
1037 	}
1038 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
1039 	if (rc) {
1040 		root = ERR_PTR(rc);
1041 		goto out;
1042 	}
1043 
1044 	rc = cifs_setup_cifs_sb(cifs_sb);
1045 	if (rc) {
1046 		root = ERR_PTR(rc);
1047 		goto out;
1048 	}
1049 
1050 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
1051 	if (rc) {
1052 		if (!(flags & SB_SILENT))
1053 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
1054 				 rc);
1055 		root = ERR_PTR(rc);
1056 		goto out;
1057 	}
1058 
1059 	mnt_data.ctx = cifs_sb->ctx;
1060 	mnt_data.cifs_sb = cifs_sb;
1061 	mnt_data.flags = flags;
1062 
1063 	/* BB should we make this contingent on mount parm? */
1064 	flags |= SB_NODIRATIME | SB_NOATIME;
1065 
1066 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
1067 	if (IS_ERR(sb)) {
1068 		cifs_umount(cifs_sb);
1069 		return ERR_CAST(sb);
1070 	}
1071 
1072 	if (sb->s_root) {
1073 		cifs_dbg(FYI, "Use existing superblock\n");
1074 		cifs_umount(cifs_sb);
1075 		cifs_sb = NULL;
1076 	} else {
1077 		rc = cifs_read_super(sb);
1078 		if (rc) {
1079 			root = ERR_PTR(rc);
1080 			goto out_super;
1081 		}
1082 
1083 		sb->s_flags |= SB_ACTIVE;
1084 	}
1085 
1086 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1087 	if (IS_ERR(root))
1088 		goto out_super;
1089 
1090 	if (cifs_sb)
1091 		cifs_sb->root = dget(root);
1092 
1093 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1094 	return root;
1095 
1096 out_super:
1097 	deactivate_locked_super(sb);
1098 	return root;
1099 out:
1100 	kfree(cifs_sb->prepath);
1101 	smb3_cleanup_fs_context(cifs_sb->ctx);
1102 	kfree(cifs_sb);
1103 	return root;
1104 }
1105 
cifs_llseek(struct file * file,loff_t offset,int whence)1106 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1107 {
1108 	struct cifsFileInfo *cfile = file->private_data;
1109 	struct cifs_tcon *tcon;
1110 
1111 	/*
1112 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1113 	 * the cached file length
1114 	 */
1115 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1116 		int rc;
1117 		struct inode *inode = file_inode(file);
1118 
1119 		/*
1120 		 * We need to be sure that all dirty pages are written and the
1121 		 * server has the newest file length.
1122 		 */
1123 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1124 		    inode->i_mapping->nrpages != 0) {
1125 			rc = filemap_fdatawait(inode->i_mapping);
1126 			if (rc) {
1127 				mapping_set_error(inode->i_mapping, rc);
1128 				return rc;
1129 			}
1130 		}
1131 		/*
1132 		 * Some applications poll for the file length in this strange
1133 		 * way so we must seek to end on non-oplocked files by
1134 		 * setting the revalidate time to zero.
1135 		 */
1136 		CIFS_I(inode)->time = 0;
1137 
1138 		rc = cifs_revalidate_file_attr(file);
1139 		if (rc < 0)
1140 			return (loff_t)rc;
1141 	}
1142 	if (cfile && cfile->tlink) {
1143 		tcon = tlink_tcon(cfile->tlink);
1144 		if (tcon->ses->server->ops->llseek)
1145 			return tcon->ses->server->ops->llseek(file, tcon,
1146 							      offset, whence);
1147 	}
1148 	return generic_file_llseek(file, offset, whence);
1149 }
1150 
1151 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1152 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1153 {
1154 	/*
1155 	 * Note that this is called by vfs setlease with i_lock held to
1156 	 * protect *lease from going away.
1157 	 */
1158 	struct inode *inode = file_inode(file);
1159 	struct cifsFileInfo *cfile = file->private_data;
1160 
1161 	/* Check if file is oplocked if this is request for new lease */
1162 	if (arg == F_UNLCK ||
1163 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1164 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1165 		return generic_setlease(file, arg, lease, priv);
1166 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1167 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1168 		/*
1169 		 * If the server claims to support oplock on this file, then we
1170 		 * still need to check oplock even if the local_lease mount
1171 		 * option is set, but there are servers which do not support
1172 		 * oplock for which this mount option may be useful if the user
1173 		 * knows that the file won't be changed on the server by anyone
1174 		 * else.
1175 		 */
1176 		return generic_setlease(file, arg, lease, priv);
1177 	else
1178 		return -EAGAIN;
1179 }
1180 
1181 struct file_system_type cifs_fs_type = {
1182 	.owner = THIS_MODULE,
1183 	.name = "cifs",
1184 	.init_fs_context = smb3_init_fs_context,
1185 	.parameters = smb3_fs_parameters,
1186 	.kill_sb = cifs_kill_sb,
1187 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1188 };
1189 MODULE_ALIAS_FS("cifs");
1190 
1191 struct file_system_type smb3_fs_type = {
1192 	.owner = THIS_MODULE,
1193 	.name = "smb3",
1194 	.init_fs_context = smb3_init_fs_context,
1195 	.parameters = smb3_fs_parameters,
1196 	.kill_sb = cifs_kill_sb,
1197 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1198 };
1199 MODULE_ALIAS_FS("smb3");
1200 MODULE_ALIAS("smb3");
1201 
1202 const struct inode_operations cifs_dir_inode_ops = {
1203 	.create = cifs_create,
1204 	.atomic_open = cifs_atomic_open,
1205 	.tmpfile = cifs_tmpfile,
1206 	.lookup = cifs_lookup,
1207 	.getattr = cifs_getattr,
1208 	.unlink = cifs_unlink,
1209 	.link = cifs_hardlink,
1210 	.mkdir = cifs_mkdir,
1211 	.rmdir = cifs_rmdir,
1212 	.rename = cifs_rename2,
1213 	.permission = cifs_permission,
1214 	.setattr = cifs_setattr,
1215 	.symlink = cifs_symlink,
1216 	.mknod   = cifs_mknod,
1217 	.listxattr = cifs_listxattr,
1218 	.get_acl = cifs_get_acl,
1219 	.set_acl = cifs_set_acl,
1220 };
1221 
1222 const struct inode_operations cifs_file_inode_ops = {
1223 	.setattr = cifs_setattr,
1224 	.getattr = cifs_getattr,
1225 	.permission = cifs_permission,
1226 	.listxattr = cifs_listxattr,
1227 	.fiemap = cifs_fiemap,
1228 	.get_acl = cifs_get_acl,
1229 	.set_acl = cifs_set_acl,
1230 };
1231 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1232 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1233 			    struct delayed_call *done)
1234 {
1235 	char *target_path;
1236 
1237 	if (!dentry)
1238 		return ERR_PTR(-ECHILD);
1239 
1240 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1241 	if (!target_path)
1242 		return ERR_PTR(-ENOMEM);
1243 
1244 	spin_lock(&inode->i_lock);
1245 	if (likely(CIFS_I(inode)->symlink_target)) {
1246 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1247 	} else {
1248 		kfree(target_path);
1249 		target_path = ERR_PTR(-EOPNOTSUPP);
1250 	}
1251 	spin_unlock(&inode->i_lock);
1252 
1253 	if (!IS_ERR(target_path))
1254 		set_delayed_call(done, kfree_link, target_path);
1255 
1256 	return target_path;
1257 }
1258 
1259 const struct inode_operations cifs_symlink_inode_ops = {
1260 	.get_link = cifs_get_link,
1261 	.setattr = cifs_setattr,
1262 	.permission = cifs_permission,
1263 	.listxattr = cifs_listxattr,
1264 };
1265 
1266 /*
1267  * Advance the EOF marker to after the source range.
1268  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1269 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1270 				struct cifs_tcon *src_tcon,
1271 				unsigned int xid, loff_t src_end)
1272 {
1273 	struct cifsFileInfo *writeable_srcfile;
1274 	int rc = -EINVAL;
1275 
1276 	writeable_srcfile = find_writable_file(src_cifsi, FIND_FSUID_ONLY);
1277 	if (writeable_srcfile) {
1278 		if (src_tcon->ses->server->ops->set_file_size)
1279 			rc = src_tcon->ses->server->ops->set_file_size(
1280 				xid, src_tcon, writeable_srcfile,
1281 				src_inode->i_size, true /* no need to set sparse */);
1282 		else
1283 			rc = -ENOSYS;
1284 		cifsFileInfo_put(writeable_srcfile);
1285 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1286 	}
1287 
1288 	if (rc < 0)
1289 		goto set_failed;
1290 
1291 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1292 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1293 	return 0;
1294 
1295 set_failed:
1296 	return filemap_write_and_wait(src_inode->i_mapping);
1297 }
1298 
1299 /*
1300  * Flush out either the folio that overlaps the beginning of a range in which
1301  * pos resides or the folio that overlaps the end of a range unless that folio
1302  * is entirely within the range we're going to invalidate.  We extend the flush
1303  * bounds to encompass the folio.
1304  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1305 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1306 			    bool first)
1307 {
1308 	struct folio *folio;
1309 	unsigned long long fpos, fend;
1310 	pgoff_t index = pos / PAGE_SIZE;
1311 	size_t size;
1312 	int rc = 0;
1313 
1314 	folio = filemap_get_folio(inode->i_mapping, index);
1315 	if (IS_ERR(folio))
1316 		return 0;
1317 
1318 	size = folio_size(folio);
1319 	fpos = folio_pos(folio);
1320 	fend = fpos + size - 1;
1321 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1322 	*_fend   = max_t(unsigned long long, *_fend, fend);
1323 	if ((first && pos == fpos) || (!first && pos == fend))
1324 		goto out;
1325 
1326 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1327 out:
1328 	folio_put(folio);
1329 	return rc;
1330 }
1331 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1332 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1333 		struct file *dst_file, loff_t destoff, loff_t len,
1334 		unsigned int remap_flags)
1335 {
1336 	struct inode *src_inode = file_inode(src_file);
1337 	struct inode *target_inode = file_inode(dst_file);
1338 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1339 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1340 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1341 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1342 	struct cifs_tcon *target_tcon, *src_tcon;
1343 	unsigned long long destend, fstart, fend, old_size, new_size;
1344 	unsigned int xid;
1345 	int rc;
1346 
1347 	if (remap_flags & REMAP_FILE_DEDUP)
1348 		return -EOPNOTSUPP;
1349 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1350 		return -EINVAL;
1351 
1352 	cifs_dbg(FYI, "clone range\n");
1353 
1354 	xid = get_xid();
1355 
1356 	if (!smb_file_src || !smb_file_target) {
1357 		rc = -EBADF;
1358 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1359 		goto out;
1360 	}
1361 
1362 	src_tcon = tlink_tcon(smb_file_src->tlink);
1363 	target_tcon = tlink_tcon(smb_file_target->tlink);
1364 
1365 	/*
1366 	 * Note: cifs case is easier than btrfs since server responsible for
1367 	 * checks for proper open modes and file type and if it wants
1368 	 * server could even support copy of range where source = target
1369 	 */
1370 	lock_two_nondirectories(target_inode, src_inode);
1371 
1372 	if (len == 0)
1373 		len = src_inode->i_size - off;
1374 
1375 	cifs_dbg(FYI, "clone range\n");
1376 
1377 	/* Flush the source buffer */
1378 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1379 					  off + len - 1);
1380 	if (rc)
1381 		goto unlock;
1382 
1383 	/* The server-side copy will fail if the source crosses the EOF marker.
1384 	 * Advance the EOF marker after the flush above to the end of the range
1385 	 * if it's short of that.
1386 	 */
1387 	if (src_cifsi->netfs.remote_i_size < off + len) {
1388 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1389 		if (rc < 0)
1390 			goto unlock;
1391 	}
1392 
1393 	new_size = destoff + len;
1394 	destend = destoff + len - 1;
1395 
1396 	/* Flush the folios at either end of the destination range to prevent
1397 	 * accidental loss of dirty data outside of the range.
1398 	 */
1399 	fstart = destoff;
1400 	fend = destend;
1401 
1402 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1403 	if (rc)
1404 		goto unlock;
1405 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1406 	if (rc)
1407 		goto unlock;
1408 	if (fend > target_cifsi->netfs.zero_point)
1409 		target_cifsi->netfs.zero_point = fend + 1;
1410 	old_size = target_cifsi->netfs.remote_i_size;
1411 
1412 	/* Discard all the folios that overlap the destination region. */
1413 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1414 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1415 
1416 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1417 			   i_size_read(target_inode), 0);
1418 
1419 	rc = -EOPNOTSUPP;
1420 	if (target_tcon->ses->server->ops->duplicate_extents) {
1421 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1422 			smb_file_src, smb_file_target, off, len, destoff);
1423 		if (rc == 0 && new_size > old_size) {
1424 			truncate_setsize(target_inode, new_size);
1425 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1426 					      new_size);
1427 		} else if (rc == -EOPNOTSUPP) {
1428 			/*
1429 			 * copy_file_range syscall man page indicates EINVAL
1430 			 * is returned e.g when "fd_in and fd_out refer to the
1431 			 * same file and the source and target ranges overlap."
1432 			 * Test generic/157 was what showed these cases where
1433 			 * we need to remap EOPNOTSUPP to EINVAL
1434 			 */
1435 			if (off >= src_inode->i_size) {
1436 				rc = -EINVAL;
1437 			} else if (src_inode == target_inode) {
1438 				if (off + len > destoff)
1439 					rc = -EINVAL;
1440 			}
1441 		}
1442 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1443 			target_cifsi->netfs.zero_point = new_size;
1444 	}
1445 
1446 	/* force revalidate of size and timestamps of target file now
1447 	   that target is updated on the server */
1448 	CIFS_I(target_inode)->time = 0;
1449 unlock:
1450 	/* although unlocking in the reverse order from locking is not
1451 	   strictly necessary here it is a little cleaner to be consistent */
1452 	unlock_two_nondirectories(src_inode, target_inode);
1453 out:
1454 	free_xid(xid);
1455 	return rc < 0 ? rc : len;
1456 }
1457 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1458 ssize_t cifs_file_copychunk_range(unsigned int xid,
1459 				struct file *src_file, loff_t off,
1460 				struct file *dst_file, loff_t destoff,
1461 				size_t len, unsigned int flags)
1462 {
1463 	struct inode *src_inode = file_inode(src_file);
1464 	struct inode *target_inode = file_inode(dst_file);
1465 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1466 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1467 	struct cifsFileInfo *smb_file_src;
1468 	struct cifsFileInfo *smb_file_target;
1469 	struct cifs_tcon *src_tcon;
1470 	struct cifs_tcon *target_tcon;
1471 	ssize_t rc;
1472 
1473 	cifs_dbg(FYI, "copychunk range\n");
1474 
1475 	if (!src_file->private_data || !dst_file->private_data) {
1476 		rc = -EBADF;
1477 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1478 		goto out;
1479 	}
1480 
1481 	rc = -EXDEV;
1482 	smb_file_target = dst_file->private_data;
1483 	smb_file_src = src_file->private_data;
1484 	src_tcon = tlink_tcon(smb_file_src->tlink);
1485 	target_tcon = tlink_tcon(smb_file_target->tlink);
1486 
1487 	if (src_tcon->ses != target_tcon->ses) {
1488 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1489 		goto out;
1490 	}
1491 
1492 	rc = -EOPNOTSUPP;
1493 	if (!target_tcon->ses->server->ops->copychunk_range)
1494 		goto out;
1495 
1496 	/*
1497 	 * Note: cifs case is easier than btrfs since server responsible for
1498 	 * checks for proper open modes and file type and if it wants
1499 	 * server could even support copy of range where source = target
1500 	 */
1501 	lock_two_nondirectories(target_inode, src_inode);
1502 
1503 	cifs_dbg(FYI, "about to flush pages\n");
1504 
1505 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1506 					  off + len - 1);
1507 	if (rc)
1508 		goto unlock;
1509 
1510 	/* The server-side copy will fail if the source crosses the EOF marker.
1511 	 * Advance the EOF marker after the flush above to the end of the range
1512 	 * if it's short of that.
1513 	 */
1514 	if (src_cifsi->netfs.remote_i_size < off + len) {
1515 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1516 		if (rc < 0)
1517 			goto unlock;
1518 	}
1519 
1520 	/* Flush and invalidate all the folios in the destination region.  If
1521 	 * the copy was successful, then some of the flush is extra overhead,
1522 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1523 	 */
1524 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1525 	if (rc)
1526 		goto unlock;
1527 
1528 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1529 			   i_size_read(target_inode), 0);
1530 
1531 	rc = file_modified(dst_file);
1532 	if (!rc) {
1533 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1534 			smb_file_src, smb_file_target, off, len, destoff);
1535 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1536 			truncate_setsize(target_inode, destoff + rc);
1537 			netfs_resize_file(&target_cifsi->netfs,
1538 					  i_size_read(target_inode), true);
1539 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1540 					      i_size_read(target_inode));
1541 		}
1542 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1543 			target_cifsi->netfs.zero_point = destoff + rc;
1544 	}
1545 
1546 	file_accessed(src_file);
1547 
1548 	/* force revalidate of size and timestamps of target file now
1549 	 * that target is updated on the server
1550 	 */
1551 	CIFS_I(target_inode)->time = 0;
1552 
1553 unlock:
1554 	/* although unlocking in the reverse order from locking is not
1555 	 * strictly necessary here it is a little cleaner to be consistent
1556 	 */
1557 	unlock_two_nondirectories(src_inode, target_inode);
1558 
1559 out:
1560 	return rc;
1561 }
1562 
1563 /*
1564  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1565  * is a dummy operation.
1566  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1567 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1568 {
1569 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1570 		 file, datasync);
1571 
1572 	return 0;
1573 }
1574 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1575 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1576 				struct file *dst_file, loff_t destoff,
1577 				size_t len, unsigned int flags)
1578 {
1579 	unsigned int xid = get_xid();
1580 	ssize_t rc;
1581 	struct cifsFileInfo *cfile = dst_file->private_data;
1582 
1583 	if (cfile->swapfile) {
1584 		rc = -EOPNOTSUPP;
1585 		free_xid(xid);
1586 		return rc;
1587 	}
1588 
1589 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1590 					len, flags);
1591 	free_xid(xid);
1592 
1593 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1594 		rc = splice_copy_file_range(src_file, off, dst_file,
1595 					    destoff, len);
1596 	return rc;
1597 }
1598 
1599 const struct file_operations cifs_file_ops = {
1600 	.read_iter = cifs_loose_read_iter,
1601 	.write_iter = cifs_file_write_iter,
1602 	.open = cifs_open,
1603 	.release = cifs_close,
1604 	.lock = cifs_lock,
1605 	.flock = cifs_flock,
1606 	.fsync = cifs_fsync,
1607 	.flush = cifs_flush,
1608 	.mmap_prepare = cifs_file_mmap_prepare,
1609 	.splice_read = filemap_splice_read,
1610 	.splice_write = iter_file_splice_write,
1611 	.llseek = cifs_llseek,
1612 	.unlocked_ioctl	= cifs_ioctl,
1613 	.copy_file_range = cifs_copy_file_range,
1614 	.remap_file_range = cifs_remap_file_range,
1615 	.setlease = cifs_setlease,
1616 	.fallocate = cifs_fallocate,
1617 };
1618 
1619 const struct file_operations cifs_file_strict_ops = {
1620 	.read_iter = cifs_strict_readv,
1621 	.write_iter = cifs_strict_writev,
1622 	.open = cifs_open,
1623 	.release = cifs_close,
1624 	.lock = cifs_lock,
1625 	.flock = cifs_flock,
1626 	.fsync = cifs_strict_fsync,
1627 	.flush = cifs_flush,
1628 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1629 	.splice_read = filemap_splice_read,
1630 	.splice_write = iter_file_splice_write,
1631 	.llseek = cifs_llseek,
1632 	.unlocked_ioctl	= cifs_ioctl,
1633 	.copy_file_range = cifs_copy_file_range,
1634 	.remap_file_range = cifs_remap_file_range,
1635 	.setlease = cifs_setlease,
1636 	.fallocate = cifs_fallocate,
1637 };
1638 
1639 const struct file_operations cifs_file_direct_ops = {
1640 	.read_iter = netfs_unbuffered_read_iter,
1641 	.write_iter = netfs_file_write_iter,
1642 	.open = cifs_open,
1643 	.release = cifs_close,
1644 	.lock = cifs_lock,
1645 	.flock = cifs_flock,
1646 	.fsync = cifs_fsync,
1647 	.flush = cifs_flush,
1648 	.mmap_prepare = cifs_file_mmap_prepare,
1649 	.splice_read = copy_splice_read,
1650 	.splice_write = iter_file_splice_write,
1651 	.unlocked_ioctl  = cifs_ioctl,
1652 	.copy_file_range = cifs_copy_file_range,
1653 	.remap_file_range = cifs_remap_file_range,
1654 	.llseek = cifs_llseek,
1655 	.setlease = cifs_setlease,
1656 	.fallocate = cifs_fallocate,
1657 };
1658 
1659 const struct file_operations cifs_file_nobrl_ops = {
1660 	.read_iter = cifs_loose_read_iter,
1661 	.write_iter = cifs_file_write_iter,
1662 	.open = cifs_open,
1663 	.release = cifs_close,
1664 	.fsync = cifs_fsync,
1665 	.flush = cifs_flush,
1666 	.mmap_prepare = cifs_file_mmap_prepare,
1667 	.splice_read = filemap_splice_read,
1668 	.splice_write = iter_file_splice_write,
1669 	.llseek = cifs_llseek,
1670 	.unlocked_ioctl	= cifs_ioctl,
1671 	.copy_file_range = cifs_copy_file_range,
1672 	.remap_file_range = cifs_remap_file_range,
1673 	.setlease = cifs_setlease,
1674 	.fallocate = cifs_fallocate,
1675 };
1676 
1677 const struct file_operations cifs_file_strict_nobrl_ops = {
1678 	.read_iter = cifs_strict_readv,
1679 	.write_iter = cifs_strict_writev,
1680 	.open = cifs_open,
1681 	.release = cifs_close,
1682 	.fsync = cifs_strict_fsync,
1683 	.flush = cifs_flush,
1684 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1685 	.splice_read = filemap_splice_read,
1686 	.splice_write = iter_file_splice_write,
1687 	.llseek = cifs_llseek,
1688 	.unlocked_ioctl	= cifs_ioctl,
1689 	.copy_file_range = cifs_copy_file_range,
1690 	.remap_file_range = cifs_remap_file_range,
1691 	.setlease = cifs_setlease,
1692 	.fallocate = cifs_fallocate,
1693 };
1694 
1695 const struct file_operations cifs_file_direct_nobrl_ops = {
1696 	.read_iter = netfs_unbuffered_read_iter,
1697 	.write_iter = netfs_file_write_iter,
1698 	.open = cifs_open,
1699 	.release = cifs_close,
1700 	.fsync = cifs_fsync,
1701 	.flush = cifs_flush,
1702 	.mmap_prepare = cifs_file_mmap_prepare,
1703 	.splice_read = copy_splice_read,
1704 	.splice_write = iter_file_splice_write,
1705 	.unlocked_ioctl  = cifs_ioctl,
1706 	.copy_file_range = cifs_copy_file_range,
1707 	.remap_file_range = cifs_remap_file_range,
1708 	.llseek = cifs_llseek,
1709 	.setlease = cifs_setlease,
1710 	.fallocate = cifs_fallocate,
1711 };
1712 
1713 const struct file_operations cifs_dir_ops = {
1714 	.iterate_shared = cifs_readdir,
1715 	.release = cifs_closedir,
1716 	.read    = generic_read_dir,
1717 	.unlocked_ioctl  = cifs_ioctl,
1718 	.copy_file_range = cifs_copy_file_range,
1719 	.remap_file_range = cifs_remap_file_range,
1720 	.llseek = generic_file_llseek,
1721 	.fsync = cifs_dir_fsync,
1722 };
1723 
1724 static void
cifs_init_once(void * inode)1725 cifs_init_once(void *inode)
1726 {
1727 	struct cifsInodeInfo *cifsi = inode;
1728 
1729 	inode_init_once(&cifsi->netfs.inode);
1730 	init_rwsem(&cifsi->lock_sem);
1731 }
1732 
1733 static int __init
cifs_init_inodecache(void)1734 cifs_init_inodecache(void)
1735 {
1736 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1737 					      sizeof(struct cifsInodeInfo),
1738 					      0, (SLAB_RECLAIM_ACCOUNT|
1739 						SLAB_ACCOUNT),
1740 					      cifs_init_once);
1741 	if (cifs_inode_cachep == NULL)
1742 		return -ENOMEM;
1743 
1744 	return 0;
1745 }
1746 
1747 static void
cifs_destroy_inodecache(void)1748 cifs_destroy_inodecache(void)
1749 {
1750 	/*
1751 	 * Make sure all delayed rcu free inodes are flushed before we
1752 	 * destroy cache.
1753 	 */
1754 	rcu_barrier();
1755 	kmem_cache_destroy(cifs_inode_cachep);
1756 }
1757 
1758 static int
cifs_init_request_bufs(void)1759 cifs_init_request_bufs(void)
1760 {
1761 	/*
1762 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1763 	 * allocate some more bytes for CIFS.
1764 	 */
1765 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1766 
1767 	if (CIFSMaxBufSize < 8192) {
1768 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1769 	Unicode path name has to fit in any SMB/CIFS path based frames */
1770 		CIFSMaxBufSize = 8192;
1771 	} else if (CIFSMaxBufSize > 1024*127) {
1772 		CIFSMaxBufSize = 1024 * 127;
1773 	} else {
1774 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1775 	}
1776 /*
1777 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1778 		 CIFSMaxBufSize, CIFSMaxBufSize);
1779 */
1780 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1781 					    CIFSMaxBufSize + max_hdr_size, 0,
1782 					    SLAB_HWCACHE_ALIGN, 0,
1783 					    CIFSMaxBufSize + max_hdr_size,
1784 					    NULL);
1785 	if (cifs_req_cachep == NULL)
1786 		return -ENOMEM;
1787 
1788 	if (cifs_min_rcv < 1)
1789 		cifs_min_rcv = 1;
1790 	else if (cifs_min_rcv > 64) {
1791 		cifs_min_rcv = 64;
1792 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1793 	}
1794 
1795 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1796 						  cifs_req_cachep);
1797 
1798 	if (cifs_req_poolp == NULL) {
1799 		kmem_cache_destroy(cifs_req_cachep);
1800 		return -ENOMEM;
1801 	}
1802 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1803 	almost all handle based requests (but not write response, nor is it
1804 	sufficient for path based requests).  A smaller size would have
1805 	been more efficient (compacting multiple slab items on one 4k page)
1806 	for the case in which debug was on, but this larger size allows
1807 	more SMBs to use small buffer alloc and is still much more
1808 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1809 	alloc of large cifs buffers even when page debugging is on */
1810 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1811 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1812 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1813 	if (cifs_sm_req_cachep == NULL) {
1814 		mempool_destroy(cifs_req_poolp);
1815 		kmem_cache_destroy(cifs_req_cachep);
1816 		return -ENOMEM;
1817 	}
1818 
1819 	if (cifs_min_small < 2)
1820 		cifs_min_small = 2;
1821 	else if (cifs_min_small > 256) {
1822 		cifs_min_small = 256;
1823 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1824 	}
1825 
1826 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1827 						     cifs_sm_req_cachep);
1828 
1829 	if (cifs_sm_req_poolp == NULL) {
1830 		mempool_destroy(cifs_req_poolp);
1831 		kmem_cache_destroy(cifs_req_cachep);
1832 		kmem_cache_destroy(cifs_sm_req_cachep);
1833 		return -ENOMEM;
1834 	}
1835 
1836 	return 0;
1837 }
1838 
1839 static void
cifs_destroy_request_bufs(void)1840 cifs_destroy_request_bufs(void)
1841 {
1842 	mempool_destroy(cifs_req_poolp);
1843 	kmem_cache_destroy(cifs_req_cachep);
1844 	mempool_destroy(cifs_sm_req_poolp);
1845 	kmem_cache_destroy(cifs_sm_req_cachep);
1846 }
1847 
init_mids(void)1848 static int init_mids(void)
1849 {
1850 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1851 					    sizeof(struct mid_q_entry), 0,
1852 					    SLAB_HWCACHE_ALIGN, NULL);
1853 	if (cifs_mid_cachep == NULL)
1854 		return -ENOMEM;
1855 
1856 	/* 3 is a reasonable minimum number of simultaneous operations */
1857 	if (mempool_init_slab_pool(&cifs_mid_pool, 3, cifs_mid_cachep) < 0) {
1858 		kmem_cache_destroy(cifs_mid_cachep);
1859 		return -ENOMEM;
1860 	}
1861 
1862 	return 0;
1863 }
1864 
destroy_mids(void)1865 static void destroy_mids(void)
1866 {
1867 	mempool_exit(&cifs_mid_pool);
1868 	kmem_cache_destroy(cifs_mid_cachep);
1869 }
1870 
cifs_init_netfs(void)1871 static int cifs_init_netfs(void)
1872 {
1873 	cifs_io_request_cachep =
1874 		kmem_cache_create("cifs_io_request",
1875 				  sizeof(struct cifs_io_request), 0,
1876 				  SLAB_HWCACHE_ALIGN, NULL);
1877 	if (!cifs_io_request_cachep)
1878 		goto nomem_req;
1879 
1880 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1881 		goto nomem_reqpool;
1882 
1883 	cifs_io_subrequest_cachep =
1884 		kmem_cache_create("cifs_io_subrequest",
1885 				  sizeof(struct cifs_io_subrequest), 0,
1886 				  SLAB_HWCACHE_ALIGN, NULL);
1887 	if (!cifs_io_subrequest_cachep)
1888 		goto nomem_subreq;
1889 
1890 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1891 		goto nomem_subreqpool;
1892 
1893 	return 0;
1894 
1895 nomem_subreqpool:
1896 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1897 nomem_subreq:
1898 	mempool_exit(&cifs_io_request_pool);
1899 nomem_reqpool:
1900 	kmem_cache_destroy(cifs_io_request_cachep);
1901 nomem_req:
1902 	return -ENOMEM;
1903 }
1904 
cifs_destroy_netfs(void)1905 static void cifs_destroy_netfs(void)
1906 {
1907 	mempool_exit(&cifs_io_subrequest_pool);
1908 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1909 	mempool_exit(&cifs_io_request_pool);
1910 	kmem_cache_destroy(cifs_io_request_cachep);
1911 }
1912 
1913 static int __init
init_cifs(void)1914 init_cifs(void)
1915 {
1916 	int rc = 0;
1917 
1918 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1919 	rc = smb1_init_maperror();
1920 	if (rc)
1921 		return rc;
1922 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1923 
1924 	rc = smb2_init_maperror();
1925 	if (rc)
1926 		return rc;
1927 
1928 	cifs_proc_init();
1929 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1930 /*
1931  *  Initialize Global counters
1932  */
1933 	atomic_set(&sesInfoAllocCount, 0);
1934 	atomic_set(&tconInfoAllocCount, 0);
1935 	atomic_set(&tcpSesNextId, 0);
1936 	atomic_set(&tcpSesAllocCount, 0);
1937 	atomic_set(&tcpSesReconnectCount, 0);
1938 	atomic_set(&tconInfoReconnectCount, 0);
1939 
1940 	atomic_set(&buf_alloc_count, 0);
1941 	atomic_set(&small_buf_alloc_count, 0);
1942 #ifdef CONFIG_CIFS_STATS2
1943 	atomic_set(&total_buf_alloc_count, 0);
1944 	atomic_set(&total_small_buf_alloc_count, 0);
1945 	if (slow_rsp_threshold < 1)
1946 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1947 	else if (slow_rsp_threshold > 32767)
1948 		cifs_dbg(VFS,
1949 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1950 #endif /* CONFIG_CIFS_STATS2 */
1951 
1952 	atomic_set(&mid_count, 0);
1953 	GlobalCurrentXid = 0;
1954 	GlobalTotalActiveXid = 0;
1955 	GlobalMaxActiveXid = 0;
1956 
1957 	cifs_lock_secret = get_random_u32();
1958 
1959 	if (cifs_max_pending < 2) {
1960 		cifs_max_pending = 2;
1961 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1962 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1963 		cifs_max_pending = CIFS_MAX_REQ;
1964 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1965 			 CIFS_MAX_REQ);
1966 	}
1967 
1968 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1969 	if (dir_cache_timeout > 65000) {
1970 		dir_cache_timeout = 65000;
1971 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1972 	}
1973 
1974 	cifsiod_wq = alloc_workqueue("cifsiod",
1975 				     WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1976 				     0);
1977 	if (!cifsiod_wq) {
1978 		rc = -ENOMEM;
1979 		goto out_clean_proc;
1980 	}
1981 
1982 	/*
1983 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1984 	 * so that we don't launch too many worker threads but
1985 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1986 	 */
1987 
1988 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1989 	decrypt_wq = alloc_workqueue("smb3decryptd",
1990 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1991 	if (!decrypt_wq) {
1992 		rc = -ENOMEM;
1993 		goto out_destroy_cifsiod_wq;
1994 	}
1995 
1996 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1997 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1998 	if (!fileinfo_put_wq) {
1999 		rc = -ENOMEM;
2000 		goto out_destroy_decrypt_wq;
2001 	}
2002 
2003 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
2004 					 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2005 					 0);
2006 	if (!cifsoplockd_wq) {
2007 		rc = -ENOMEM;
2008 		goto out_destroy_fileinfo_put_wq;
2009 	}
2010 
2011 	deferredclose_wq = alloc_workqueue("deferredclose",
2012 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2013 					   0);
2014 	if (!deferredclose_wq) {
2015 		rc = -ENOMEM;
2016 		goto out_destroy_cifsoplockd_wq;
2017 	}
2018 
2019 	serverclose_wq = alloc_workqueue("serverclose",
2020 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2021 					   0);
2022 	if (!serverclose_wq) {
2023 		rc = -ENOMEM;
2024 		goto out_destroy_deferredclose_wq;
2025 	}
2026 
2027 	cfid_put_wq = alloc_workqueue("cfid_put_wq",
2028 				      WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2029 				      0);
2030 	if (!cfid_put_wq) {
2031 		rc = -ENOMEM;
2032 		goto out_destroy_serverclose_wq;
2033 	}
2034 
2035 	rc = cifs_init_inodecache();
2036 	if (rc)
2037 		goto out_destroy_cfid_put_wq;
2038 
2039 	rc = cifs_init_netfs();
2040 	if (rc)
2041 		goto out_destroy_inodecache;
2042 
2043 	rc = init_mids();
2044 	if (rc)
2045 		goto out_destroy_netfs;
2046 
2047 	rc = cifs_init_request_bufs();
2048 	if (rc)
2049 		goto out_destroy_mids;
2050 
2051 #ifdef CONFIG_CIFS_DFS_UPCALL
2052 	rc = dfs_cache_init();
2053 	if (rc)
2054 		goto out_destroy_request_bufs;
2055 #endif /* CONFIG_CIFS_DFS_UPCALL */
2056 #ifdef CONFIG_CIFS_UPCALL
2057 	rc = init_cifs_spnego();
2058 	if (rc)
2059 		goto out_destroy_dfs_cache;
2060 #endif /* CONFIG_CIFS_UPCALL */
2061 #ifdef CONFIG_CIFS_SWN_UPCALL
2062 	rc = cifs_genl_init();
2063 	if (rc)
2064 		goto out_register_key_type;
2065 #endif /* CONFIG_CIFS_SWN_UPCALL */
2066 
2067 	rc = init_cifs_idmap();
2068 	if (rc)
2069 		goto out_cifs_swn_init;
2070 
2071 	rc = register_filesystem(&cifs_fs_type);
2072 	if (rc)
2073 		goto out_init_cifs_idmap;
2074 
2075 	rc = register_filesystem(&smb3_fs_type);
2076 	if (rc) {
2077 		unregister_filesystem(&cifs_fs_type);
2078 		goto out_init_cifs_idmap;
2079 	}
2080 
2081 	return 0;
2082 
2083 out_init_cifs_idmap:
2084 	exit_cifs_idmap();
2085 out_cifs_swn_init:
2086 #ifdef CONFIG_CIFS_SWN_UPCALL
2087 	cifs_genl_exit();
2088 out_register_key_type:
2089 #endif
2090 #ifdef CONFIG_CIFS_UPCALL
2091 	exit_cifs_spnego();
2092 out_destroy_dfs_cache:
2093 #endif
2094 #ifdef CONFIG_CIFS_DFS_UPCALL
2095 	dfs_cache_destroy();
2096 out_destroy_request_bufs:
2097 #endif
2098 	cifs_destroy_request_bufs();
2099 out_destroy_mids:
2100 	destroy_mids();
2101 out_destroy_netfs:
2102 	cifs_destroy_netfs();
2103 out_destroy_inodecache:
2104 	cifs_destroy_inodecache();
2105 out_destroy_cfid_put_wq:
2106 	destroy_workqueue(cfid_put_wq);
2107 out_destroy_serverclose_wq:
2108 	destroy_workqueue(serverclose_wq);
2109 out_destroy_deferredclose_wq:
2110 	destroy_workqueue(deferredclose_wq);
2111 out_destroy_cifsoplockd_wq:
2112 	destroy_workqueue(cifsoplockd_wq);
2113 out_destroy_fileinfo_put_wq:
2114 	destroy_workqueue(fileinfo_put_wq);
2115 out_destroy_decrypt_wq:
2116 	destroy_workqueue(decrypt_wq);
2117 out_destroy_cifsiod_wq:
2118 	destroy_workqueue(cifsiod_wq);
2119 out_clean_proc:
2120 	cifs_proc_clean();
2121 	return rc;
2122 }
2123 
2124 static void __exit
exit_cifs(void)2125 exit_cifs(void)
2126 {
2127 	cifs_dbg(NOISY, "exit_smb3\n");
2128 	unregister_filesystem(&cifs_fs_type);
2129 	unregister_filesystem(&smb3_fs_type);
2130 	cifs_release_automount_timer();
2131 	exit_cifs_idmap();
2132 #ifdef CONFIG_CIFS_SWN_UPCALL
2133 	cifs_genl_exit();
2134 #endif
2135 #ifdef CONFIG_CIFS_UPCALL
2136 	exit_cifs_spnego();
2137 #endif
2138 #ifdef CONFIG_CIFS_DFS_UPCALL
2139 	dfs_cache_destroy();
2140 #endif
2141 	cifs_destroy_request_bufs();
2142 	destroy_mids();
2143 	cifs_destroy_netfs();
2144 	cifs_destroy_inodecache();
2145 	destroy_workqueue(deferredclose_wq);
2146 	destroy_workqueue(cifsoplockd_wq);
2147 	destroy_workqueue(decrypt_wq);
2148 	destroy_workqueue(fileinfo_put_wq);
2149 	destroy_workqueue(serverclose_wq);
2150 	destroy_workqueue(cfid_put_wq);
2151 	destroy_workqueue(cifsiod_wq);
2152 	cifs_proc_clean();
2153 }
2154 
2155 MODULE_AUTHOR("Steve French");
2156 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2157 MODULE_DESCRIPTION
2158 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2159 	"also older servers complying with the SNIA CIFS Specification)");
2160 MODULE_VERSION(CIFS_VERSION);
2161 MODULE_SOFTDEP("nls");
2162 MODULE_SOFTDEP("aes");
2163 MODULE_SOFTDEP("cmac");
2164 MODULE_SOFTDEP("aead2");
2165 MODULE_SOFTDEP("ccm");
2166 MODULE_SOFTDEP("gcm");
2167 module_init(init_cifs)
2168 module_exit(exit_cifs)
2169