1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 
74 /*
75  * Global transaction id (XID) information
76  */
77 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
79 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
81 
82 /*
83  *  Global counters, updated atomically
84  */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91 
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head	cifs_tcp_ses_list;
100 spinlock_t		cifs_tcp_ses_lock;
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 				 "for CIFS requests. "
106 				 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 				"1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 				 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 				   "CIFS/SMB1 dialect (N/A for SMB3) "
119 				   "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 #ifdef CONFIG_CIFS_STATS2
125 unsigned int slow_rsp_threshold = 1;
126 module_param(slow_rsp_threshold, uint, 0644);
127 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
128 				   "before logging that a response is delayed. "
129 				   "Default: 1 (if set to 0 disables msg).");
130 #endif /* STATS2 */
131 
132 module_param(enable_oplocks, bool, 0644);
133 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
134 
135 module_param(enable_gcm_256, bool, 0644);
136 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
137 
138 module_param(require_gcm_256, bool, 0644);
139 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
140 
141 module_param(enable_negotiate_signing, bool, 0644);
142 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
143 
144 module_param(disable_legacy_dialects, bool, 0644);
145 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
146 				  "helpful to restrict the ability to "
147 				  "override the default dialects (SMB2.1, "
148 				  "SMB3 and SMB3.02) on mount with old "
149 				  "dialects (CIFS/SMB1 and SMB2) since "
150 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
151 				  " and less secure. Default: n/N/0");
152 
153 struct workqueue_struct	*cifsiod_wq;
154 struct workqueue_struct	*decrypt_wq;
155 struct workqueue_struct	*fileinfo_put_wq;
156 struct workqueue_struct	*cifsoplockd_wq;
157 struct workqueue_struct	*deferredclose_wq;
158 struct workqueue_struct	*serverclose_wq;
159 struct workqueue_struct	*cfid_put_wq;
160 __u32 cifs_lock_secret;
161 
162 /*
163  * Bumps refcount for cifs super block.
164  * Note that it should be only called if a reference to VFS super block is
165  * already held, e.g. in open-type syscalls context. Otherwise it can race with
166  * atomic_dec_and_test in deactivate_locked_super.
167  */
168 void
169 cifs_sb_active(struct super_block *sb)
170 {
171 	struct cifs_sb_info *server = CIFS_SB(sb);
172 
173 	if (atomic_inc_return(&server->active) == 1)
174 		atomic_inc(&sb->s_active);
175 }
176 
177 void
178 cifs_sb_deactive(struct super_block *sb)
179 {
180 	struct cifs_sb_info *server = CIFS_SB(sb);
181 
182 	if (atomic_dec_and_test(&server->active))
183 		deactivate_super(sb);
184 }
185 
186 static int
187 cifs_read_super(struct super_block *sb)
188 {
189 	struct inode *inode;
190 	struct cifs_sb_info *cifs_sb;
191 	struct cifs_tcon *tcon;
192 	struct timespec64 ts;
193 	int rc = 0;
194 
195 	cifs_sb = CIFS_SB(sb);
196 	tcon = cifs_sb_master_tcon(cifs_sb);
197 
198 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
199 		sb->s_flags |= SB_POSIXACL;
200 
201 	if (tcon->snapshot_time)
202 		sb->s_flags |= SB_RDONLY;
203 
204 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
205 		sb->s_maxbytes = MAX_LFS_FILESIZE;
206 	else
207 		sb->s_maxbytes = MAX_NON_LFS;
208 
209 	/*
210 	 * Some very old servers like DOS and OS/2 used 2 second granularity
211 	 * (while all current servers use 100ns granularity - see MS-DTYP)
212 	 * but 1 second is the maximum allowed granularity for the VFS
213 	 * so for old servers set time granularity to 1 second while for
214 	 * everything else (current servers) set it to 100ns.
215 	 */
216 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
217 	    ((tcon->ses->capabilities &
218 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
219 	    !tcon->unix_ext) {
220 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
221 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
222 		sb->s_time_min = ts.tv_sec;
223 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
224 				    cpu_to_le16(SMB_TIME_MAX), 0);
225 		sb->s_time_max = ts.tv_sec;
226 	} else {
227 		/*
228 		 * Almost every server, including all SMB2+, uses DCE TIME
229 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
230 		 */
231 		sb->s_time_gran = 100;
232 		ts = cifs_NTtimeToUnix(0);
233 		sb->s_time_min = ts.tv_sec;
234 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
235 		sb->s_time_max = ts.tv_sec;
236 	}
237 
238 	sb->s_magic = CIFS_SUPER_MAGIC;
239 	sb->s_op = &cifs_super_ops;
240 	sb->s_xattr = cifs_xattr_handlers;
241 	rc = super_setup_bdi(sb);
242 	if (rc)
243 		goto out_no_root;
244 	/* tune readahead according to rsize if readahead size not set on mount */
245 	if (cifs_sb->ctx->rsize == 0)
246 		cifs_sb->ctx->rsize =
247 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
248 	if (cifs_sb->ctx->rasize)
249 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
250 	else
251 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
252 
253 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
254 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
255 	inode = cifs_root_iget(sb);
256 
257 	if (IS_ERR(inode)) {
258 		rc = PTR_ERR(inode);
259 		goto out_no_root;
260 	}
261 
262 	if (tcon->nocase)
263 		sb->s_d_op = &cifs_ci_dentry_ops;
264 	else
265 		sb->s_d_op = &cifs_dentry_ops;
266 
267 	sb->s_root = d_make_root(inode);
268 	if (!sb->s_root) {
269 		rc = -ENOMEM;
270 		goto out_no_root;
271 	}
272 
273 #ifdef CONFIG_CIFS_NFSD_EXPORT
274 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
275 		cifs_dbg(FYI, "export ops supported\n");
276 		sb->s_export_op = &cifs_export_ops;
277 	}
278 #endif /* CONFIG_CIFS_NFSD_EXPORT */
279 
280 	return 0;
281 
282 out_no_root:
283 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 	return rc;
285 }
286 
287 static void cifs_kill_sb(struct super_block *sb)
288 {
289 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290 
291 	/*
292 	 * We need to release all dentries for the cached directories
293 	 * before we kill the sb.
294 	 */
295 	if (cifs_sb->root) {
296 		close_all_cached_dirs(cifs_sb);
297 
298 		/* finally release root dentry */
299 		dput(cifs_sb->root);
300 		cifs_sb->root = NULL;
301 	}
302 
303 	kill_anon_super(sb);
304 	cifs_umount(cifs_sb);
305 }
306 
307 static int
308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
309 {
310 	struct super_block *sb = dentry->d_sb;
311 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
312 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
313 	struct TCP_Server_Info *server = tcon->ses->server;
314 	unsigned int xid;
315 	int rc = 0;
316 	const char *full_path;
317 	void *page;
318 
319 	xid = get_xid();
320 	page = alloc_dentry_path();
321 
322 	full_path = build_path_from_dentry(dentry, page);
323 	if (IS_ERR(full_path)) {
324 		rc = PTR_ERR(full_path);
325 		goto statfs_out;
326 	}
327 
328 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
329 		buf->f_namelen =
330 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
331 	else
332 		buf->f_namelen = PATH_MAX;
333 
334 	buf->f_fsid.val[0] = tcon->vol_serial_number;
335 	/* are using part of create time for more randomness, see man statfs */
336 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
337 
338 	buf->f_files = 0;	/* undefined */
339 	buf->f_ffree = 0;	/* unlimited */
340 
341 	if (server->ops->queryfs)
342 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
343 
344 statfs_out:
345 	free_dentry_path(page);
346 	free_xid(xid);
347 	return rc;
348 }
349 
350 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
351 {
352 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
353 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
354 	struct TCP_Server_Info *server = tcon->ses->server;
355 
356 	if (server->ops->fallocate)
357 		return server->ops->fallocate(file, tcon, mode, off, len);
358 
359 	return -EOPNOTSUPP;
360 }
361 
362 static int cifs_permission(struct mnt_idmap *idmap,
363 			   struct inode *inode, int mask)
364 {
365 	struct cifs_sb_info *cifs_sb;
366 
367 	cifs_sb = CIFS_SB(inode->i_sb);
368 
369 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
370 		if ((mask & MAY_EXEC) && !execute_ok(inode))
371 			return -EACCES;
372 		else
373 			return 0;
374 	} else /* file mode might have been restricted at mount time
375 		on the client (above and beyond ACL on servers) for
376 		servers which do not support setting and viewing mode bits,
377 		so allowing client to check permissions is useful */
378 		return generic_permission(&nop_mnt_idmap, inode, mask);
379 }
380 
381 static struct kmem_cache *cifs_inode_cachep;
382 static struct kmem_cache *cifs_req_cachep;
383 static struct kmem_cache *cifs_mid_cachep;
384 static struct kmem_cache *cifs_sm_req_cachep;
385 static struct kmem_cache *cifs_io_request_cachep;
386 static struct kmem_cache *cifs_io_subrequest_cachep;
387 mempool_t *cifs_sm_req_poolp;
388 mempool_t *cifs_req_poolp;
389 mempool_t *cifs_mid_poolp;
390 mempool_t cifs_io_request_pool;
391 mempool_t cifs_io_subrequest_pool;
392 
393 static struct inode *
394 cifs_alloc_inode(struct super_block *sb)
395 {
396 	struct cifsInodeInfo *cifs_inode;
397 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
398 	if (!cifs_inode)
399 		return NULL;
400 	cifs_inode->cifsAttrs = ATTR_ARCHIVE;	/* default */
401 	cifs_inode->time = 0;
402 	/*
403 	 * Until the file is open and we have gotten oplock info back from the
404 	 * server, can not assume caching of file data or metadata.
405 	 */
406 	cifs_set_oplock_level(cifs_inode, 0);
407 	cifs_inode->lease_granted = false;
408 	cifs_inode->flags = 0;
409 	spin_lock_init(&cifs_inode->writers_lock);
410 	cifs_inode->writers = 0;
411 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
412 	cifs_inode->netfs.remote_i_size = 0;
413 	cifs_inode->uniqueid = 0;
414 	cifs_inode->createtime = 0;
415 	cifs_inode->epoch = 0;
416 	spin_lock_init(&cifs_inode->open_file_lock);
417 	generate_random_uuid(cifs_inode->lease_key);
418 	cifs_inode->symlink_target = NULL;
419 
420 	/*
421 	 * Can not set i_flags here - they get immediately overwritten to zero
422 	 * by the VFS.
423 	 */
424 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
425 	INIT_LIST_HEAD(&cifs_inode->openFileList);
426 	INIT_LIST_HEAD(&cifs_inode->llist);
427 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
428 	spin_lock_init(&cifs_inode->deferred_lock);
429 	return &cifs_inode->netfs.inode;
430 }
431 
432 static void
433 cifs_free_inode(struct inode *inode)
434 {
435 	struct cifsInodeInfo *cinode = CIFS_I(inode);
436 
437 	if (S_ISLNK(inode->i_mode))
438 		kfree(cinode->symlink_target);
439 	kmem_cache_free(cifs_inode_cachep, cinode);
440 }
441 
442 static void
443 cifs_evict_inode(struct inode *inode)
444 {
445 	netfs_wait_for_outstanding_io(inode);
446 	truncate_inode_pages_final(&inode->i_data);
447 	if (inode->i_state & I_PINNING_NETFS_WB)
448 		cifs_fscache_unuse_inode_cookie(inode, true);
449 	cifs_fscache_release_inode_cookie(inode);
450 	clear_inode(inode);
451 }
452 
453 static void
454 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
455 {
456 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
457 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
458 
459 	seq_puts(s, ",addr=");
460 
461 	switch (server->dstaddr.ss_family) {
462 	case AF_INET:
463 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
464 		break;
465 	case AF_INET6:
466 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
467 		if (sa6->sin6_scope_id)
468 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
469 		break;
470 	default:
471 		seq_puts(s, "(unknown)");
472 	}
473 	if (server->rdma)
474 		seq_puts(s, ",rdma");
475 }
476 
477 static void
478 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
479 {
480 	if (ses->sectype == Unspecified) {
481 		if (ses->user_name == NULL)
482 			seq_puts(s, ",sec=none");
483 		return;
484 	}
485 
486 	seq_puts(s, ",sec=");
487 
488 	switch (ses->sectype) {
489 	case NTLMv2:
490 		seq_puts(s, "ntlmv2");
491 		break;
492 	case Kerberos:
493 		seq_puts(s, "krb5");
494 		break;
495 	case RawNTLMSSP:
496 		seq_puts(s, "ntlmssp");
497 		break;
498 	default:
499 		/* shouldn't ever happen */
500 		seq_puts(s, "unknown");
501 		break;
502 	}
503 
504 	if (ses->sign)
505 		seq_puts(s, "i");
506 
507 	if (ses->sectype == Kerberos)
508 		seq_printf(s, ",cruid=%u",
509 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
510 }
511 
512 static void
513 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
514 {
515 	seq_puts(s, ",cache=");
516 
517 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
518 		seq_puts(s, "strict");
519 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
520 		seq_puts(s, "none");
521 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
522 		seq_puts(s, "singleclient"); /* assume only one client access */
523 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
524 		seq_puts(s, "ro"); /* read only caching assumed */
525 	else
526 		seq_puts(s, "loose");
527 }
528 
529 /*
530  * cifs_show_devname() is used so we show the mount device name with correct
531  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
532  */
533 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
534 {
535 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
536 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
537 
538 	if (devname == NULL)
539 		seq_puts(m, "none");
540 	else {
541 		convert_delimiter(devname, '/');
542 		/* escape all spaces in share names */
543 		seq_escape(m, devname, " \t");
544 		kfree(devname);
545 	}
546 	return 0;
547 }
548 
549 static void
550 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
551 {
552 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
553 		seq_puts(s, ",upcall_target=app");
554 		return;
555 	}
556 
557 	seq_puts(s, ",upcall_target=");
558 
559 	switch (cifs_sb->ctx->upcall_target) {
560 	case UPTARGET_APP:
561 		seq_puts(s, "app");
562 		break;
563 	case UPTARGET_MOUNT:
564 		seq_puts(s, "mount");
565 		break;
566 	default:
567 		/* shouldn't ever happen */
568 		seq_puts(s, "unknown");
569 		break;
570 	}
571 }
572 
573 /*
574  * cifs_show_options() is for displaying mount options in /proc/mounts.
575  * Not all settable options are displayed but most of the important
576  * ones are.
577  */
578 static int
579 cifs_show_options(struct seq_file *s, struct dentry *root)
580 {
581 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
582 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
583 	struct sockaddr *srcaddr;
584 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
585 
586 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
587 	cifs_show_security(s, tcon->ses);
588 	cifs_show_cache_flavor(s, cifs_sb);
589 	cifs_show_upcall_target(s, cifs_sb);
590 
591 	if (tcon->no_lease)
592 		seq_puts(s, ",nolease");
593 	if (cifs_sb->ctx->multiuser)
594 		seq_puts(s, ",multiuser");
595 	else if (tcon->ses->user_name)
596 		seq_show_option(s, "username", tcon->ses->user_name);
597 
598 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
599 		seq_show_option(s, "domain", tcon->ses->domainName);
600 
601 	if (srcaddr->sa_family != AF_UNSPEC) {
602 		struct sockaddr_in *saddr4;
603 		struct sockaddr_in6 *saddr6;
604 		saddr4 = (struct sockaddr_in *)srcaddr;
605 		saddr6 = (struct sockaddr_in6 *)srcaddr;
606 		if (srcaddr->sa_family == AF_INET6)
607 			seq_printf(s, ",srcaddr=%pI6c",
608 				   &saddr6->sin6_addr);
609 		else if (srcaddr->sa_family == AF_INET)
610 			seq_printf(s, ",srcaddr=%pI4",
611 				   &saddr4->sin_addr.s_addr);
612 		else
613 			seq_printf(s, ",srcaddr=BAD-AF:%i",
614 				   (int)(srcaddr->sa_family));
615 	}
616 
617 	seq_printf(s, ",uid=%u",
618 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
619 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
620 		seq_puts(s, ",forceuid");
621 	else
622 		seq_puts(s, ",noforceuid");
623 
624 	seq_printf(s, ",gid=%u",
625 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
626 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
627 		seq_puts(s, ",forcegid");
628 	else
629 		seq_puts(s, ",noforcegid");
630 
631 	cifs_show_address(s, tcon->ses->server);
632 
633 	if (!tcon->unix_ext)
634 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
635 					   cifs_sb->ctx->file_mode,
636 					   cifs_sb->ctx->dir_mode);
637 	if (cifs_sb->ctx->iocharset)
638 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
639 	if (tcon->ses->unicode == 0)
640 		seq_puts(s, ",nounicode");
641 	else if (tcon->ses->unicode == 1)
642 		seq_puts(s, ",unicode");
643 	if (tcon->seal)
644 		seq_puts(s, ",seal");
645 	else if (tcon->ses->server->ignore_signature)
646 		seq_puts(s, ",signloosely");
647 	if (tcon->nocase)
648 		seq_puts(s, ",nocase");
649 	if (tcon->nodelete)
650 		seq_puts(s, ",nodelete");
651 	if (cifs_sb->ctx->no_sparse)
652 		seq_puts(s, ",nosparse");
653 	if (tcon->local_lease)
654 		seq_puts(s, ",locallease");
655 	if (tcon->retry)
656 		seq_puts(s, ",hard");
657 	else
658 		seq_puts(s, ",soft");
659 	if (tcon->use_persistent)
660 		seq_puts(s, ",persistenthandles");
661 	else if (tcon->use_resilient)
662 		seq_puts(s, ",resilienthandles");
663 	if (tcon->posix_extensions)
664 		seq_puts(s, ",posix");
665 	else if (tcon->unix_ext)
666 		seq_puts(s, ",unix");
667 	else
668 		seq_puts(s, ",nounix");
669 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
670 		seq_puts(s, ",nodfs");
671 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
672 		seq_puts(s, ",posixpaths");
673 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
674 		seq_puts(s, ",setuids");
675 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
676 		seq_puts(s, ",idsfromsid");
677 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
678 		seq_puts(s, ",serverino");
679 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
680 		seq_puts(s, ",rwpidforward");
681 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
682 		seq_puts(s, ",forcemand");
683 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
684 		seq_puts(s, ",nouser_xattr");
685 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
686 		seq_puts(s, ",mapchars");
687 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
688 		seq_puts(s, ",mapposix");
689 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
690 		seq_puts(s, ",sfu");
691 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
692 		seq_puts(s, ",nobrl");
693 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
694 		seq_puts(s, ",nohandlecache");
695 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
696 		seq_puts(s, ",modefromsid");
697 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
698 		seq_puts(s, ",cifsacl");
699 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
700 		seq_puts(s, ",dynperm");
701 	if (root->d_sb->s_flags & SB_POSIXACL)
702 		seq_puts(s, ",acl");
703 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
704 		seq_puts(s, ",mfsymlinks");
705 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
706 		seq_puts(s, ",fsc");
707 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
708 		seq_puts(s, ",nostrictsync");
709 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
710 		seq_puts(s, ",noperm");
711 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
712 		seq_printf(s, ",backupuid=%u",
713 			   from_kuid_munged(&init_user_ns,
714 					    cifs_sb->ctx->backupuid));
715 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
716 		seq_printf(s, ",backupgid=%u",
717 			   from_kgid_munged(&init_user_ns,
718 					    cifs_sb->ctx->backupgid));
719 	seq_show_option(s, "reparse",
720 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
721 	if (cifs_sb->ctx->nonativesocket)
722 		seq_puts(s, ",nonativesocket");
723 	else
724 		seq_puts(s, ",nativesocket");
725 	seq_show_option(s, "symlink",
726 			cifs_symlink_type_str(get_cifs_symlink_type(cifs_sb)));
727 
728 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
729 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
730 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
731 	if (cifs_sb->ctx->rasize)
732 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
733 	if (tcon->ses->server->min_offload)
734 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
735 	if (tcon->ses->server->retrans)
736 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
737 	seq_printf(s, ",echo_interval=%lu",
738 			tcon->ses->server->echo_interval / HZ);
739 
740 	/* Only display the following if overridden on mount */
741 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
742 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
743 	if (tcon->ses->server->tcp_nodelay)
744 		seq_puts(s, ",tcpnodelay");
745 	if (tcon->ses->server->noautotune)
746 		seq_puts(s, ",noautotune");
747 	if (tcon->ses->server->noblocksnd)
748 		seq_puts(s, ",noblocksend");
749 	if (tcon->ses->server->nosharesock)
750 		seq_puts(s, ",nosharesock");
751 
752 	if (tcon->snapshot_time)
753 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
754 	if (tcon->handle_timeout)
755 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
756 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
757 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
758 
759 	/*
760 	 * Display file and directory attribute timeout in seconds.
761 	 * If file and directory attribute timeout the same then actimeo
762 	 * was likely specified on mount
763 	 */
764 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
765 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
766 	else {
767 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
768 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
769 	}
770 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
771 
772 	if (tcon->ses->chan_max > 1)
773 		seq_printf(s, ",multichannel,max_channels=%zu",
774 			   tcon->ses->chan_max);
775 
776 	if (tcon->use_witness)
777 		seq_puts(s, ",witness");
778 
779 	return 0;
780 }
781 
782 static void cifs_umount_begin(struct super_block *sb)
783 {
784 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
785 	struct cifs_tcon *tcon;
786 
787 	if (cifs_sb == NULL)
788 		return;
789 
790 	tcon = cifs_sb_master_tcon(cifs_sb);
791 
792 	spin_lock(&cifs_tcp_ses_lock);
793 	spin_lock(&tcon->tc_lock);
794 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
795 			    netfs_trace_tcon_ref_see_umount);
796 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
797 		/* we have other mounts to same share or we have
798 		   already tried to umount this and woken up
799 		   all waiting network requests, nothing to do */
800 		spin_unlock(&tcon->tc_lock);
801 		spin_unlock(&cifs_tcp_ses_lock);
802 		return;
803 	}
804 	/*
805 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
806 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
807 	 */
808 	spin_unlock(&tcon->tc_lock);
809 	spin_unlock(&cifs_tcp_ses_lock);
810 
811 	cifs_close_all_deferred_files(tcon);
812 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
813 	/* cancel_notify_requests(tcon); */
814 	if (tcon->ses && tcon->ses->server) {
815 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
816 		wake_up_all(&tcon->ses->server->request_q);
817 		wake_up_all(&tcon->ses->server->response_q);
818 		msleep(1); /* yield */
819 		/* we have to kick the requests once more */
820 		wake_up_all(&tcon->ses->server->response_q);
821 		msleep(1);
822 	}
823 
824 	return;
825 }
826 
827 static int cifs_freeze(struct super_block *sb)
828 {
829 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
830 	struct cifs_tcon *tcon;
831 
832 	if (cifs_sb == NULL)
833 		return 0;
834 
835 	tcon = cifs_sb_master_tcon(cifs_sb);
836 
837 	cifs_close_all_deferred_files(tcon);
838 	return 0;
839 }
840 
841 #ifdef CONFIG_CIFS_STATS2
842 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
843 {
844 	/* BB FIXME */
845 	return 0;
846 }
847 #endif
848 
849 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
850 {
851 	return netfs_unpin_writeback(inode, wbc);
852 }
853 
854 static int cifs_drop_inode(struct inode *inode)
855 {
856 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
857 
858 	/* no serverino => unconditional eviction */
859 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
860 		generic_drop_inode(inode);
861 }
862 
863 static const struct super_operations cifs_super_ops = {
864 	.statfs = cifs_statfs,
865 	.alloc_inode = cifs_alloc_inode,
866 	.write_inode	= cifs_write_inode,
867 	.free_inode = cifs_free_inode,
868 	.drop_inode	= cifs_drop_inode,
869 	.evict_inode	= cifs_evict_inode,
870 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
871 	.show_devname   = cifs_show_devname,
872 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
873 	function unless later we add lazy close of inodes or unless the
874 	kernel forgets to call us with the same number of releases (closes)
875 	as opens */
876 	.show_options = cifs_show_options,
877 	.umount_begin   = cifs_umount_begin,
878 	.freeze_fs      = cifs_freeze,
879 #ifdef CONFIG_CIFS_STATS2
880 	.show_stats = cifs_show_stats,
881 #endif
882 };
883 
884 /*
885  * Get root dentry from superblock according to prefix path mount option.
886  * Return dentry with refcount + 1 on success and NULL otherwise.
887  */
888 static struct dentry *
889 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
890 {
891 	struct dentry *dentry;
892 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
893 	char *full_path = NULL;
894 	char *s, *p;
895 	char sep;
896 
897 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
898 		return dget(sb->s_root);
899 
900 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
901 				cifs_sb_master_tcon(cifs_sb), 0);
902 	if (full_path == NULL)
903 		return ERR_PTR(-ENOMEM);
904 
905 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
906 
907 	sep = CIFS_DIR_SEP(cifs_sb);
908 	dentry = dget(sb->s_root);
909 	s = full_path;
910 
911 	do {
912 		struct inode *dir = d_inode(dentry);
913 		struct dentry *child;
914 
915 		if (!S_ISDIR(dir->i_mode)) {
916 			dput(dentry);
917 			dentry = ERR_PTR(-ENOTDIR);
918 			break;
919 		}
920 
921 		/* skip separators */
922 		while (*s == sep)
923 			s++;
924 		if (!*s)
925 			break;
926 		p = s++;
927 		/* next separator */
928 		while (*s && *s != sep)
929 			s++;
930 
931 		child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
932 							dentry);
933 		dput(dentry);
934 		dentry = child;
935 	} while (!IS_ERR(dentry));
936 	kfree(full_path);
937 	return dentry;
938 }
939 
940 static int cifs_set_super(struct super_block *sb, void *data)
941 {
942 	struct cifs_mnt_data *mnt_data = data;
943 	sb->s_fs_info = mnt_data->cifs_sb;
944 	return set_anon_super(sb, NULL);
945 }
946 
947 struct dentry *
948 cifs_smb3_do_mount(struct file_system_type *fs_type,
949 	      int flags, struct smb3_fs_context *old_ctx)
950 {
951 	struct cifs_mnt_data mnt_data;
952 	struct cifs_sb_info *cifs_sb;
953 	struct super_block *sb;
954 	struct dentry *root;
955 	int rc;
956 
957 	if (cifsFYI) {
958 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
959 			 old_ctx->source, flags);
960 	} else {
961 		cifs_info("Attempting to mount %s\n", old_ctx->source);
962 	}
963 
964 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
965 	if (!cifs_sb)
966 		return ERR_PTR(-ENOMEM);
967 
968 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
969 	if (!cifs_sb->ctx) {
970 		root = ERR_PTR(-ENOMEM);
971 		goto out;
972 	}
973 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
974 	if (rc) {
975 		root = ERR_PTR(rc);
976 		goto out;
977 	}
978 
979 	rc = cifs_setup_cifs_sb(cifs_sb);
980 	if (rc) {
981 		root = ERR_PTR(rc);
982 		goto out;
983 	}
984 
985 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
986 	if (rc) {
987 		if (!(flags & SB_SILENT))
988 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
989 				 rc);
990 		root = ERR_PTR(rc);
991 		goto out;
992 	}
993 
994 	mnt_data.ctx = cifs_sb->ctx;
995 	mnt_data.cifs_sb = cifs_sb;
996 	mnt_data.flags = flags;
997 
998 	/* BB should we make this contingent on mount parm? */
999 	flags |= SB_NODIRATIME | SB_NOATIME;
1000 
1001 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
1002 	if (IS_ERR(sb)) {
1003 		cifs_umount(cifs_sb);
1004 		return ERR_CAST(sb);
1005 	}
1006 
1007 	if (sb->s_root) {
1008 		cifs_dbg(FYI, "Use existing superblock\n");
1009 		cifs_umount(cifs_sb);
1010 		cifs_sb = NULL;
1011 	} else {
1012 		rc = cifs_read_super(sb);
1013 		if (rc) {
1014 			root = ERR_PTR(rc);
1015 			goto out_super;
1016 		}
1017 
1018 		sb->s_flags |= SB_ACTIVE;
1019 	}
1020 
1021 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1022 	if (IS_ERR(root))
1023 		goto out_super;
1024 
1025 	if (cifs_sb)
1026 		cifs_sb->root = dget(root);
1027 
1028 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1029 	return root;
1030 
1031 out_super:
1032 	deactivate_locked_super(sb);
1033 	return root;
1034 out:
1035 	kfree(cifs_sb->prepath);
1036 	smb3_cleanup_fs_context(cifs_sb->ctx);
1037 	kfree(cifs_sb);
1038 	return root;
1039 }
1040 
1041 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1042 {
1043 	struct cifsFileInfo *cfile = file->private_data;
1044 	struct cifs_tcon *tcon;
1045 
1046 	/*
1047 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1048 	 * the cached file length
1049 	 */
1050 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1051 		int rc;
1052 		struct inode *inode = file_inode(file);
1053 
1054 		/*
1055 		 * We need to be sure that all dirty pages are written and the
1056 		 * server has the newest file length.
1057 		 */
1058 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1059 		    inode->i_mapping->nrpages != 0) {
1060 			rc = filemap_fdatawait(inode->i_mapping);
1061 			if (rc) {
1062 				mapping_set_error(inode->i_mapping, rc);
1063 				return rc;
1064 			}
1065 		}
1066 		/*
1067 		 * Some applications poll for the file length in this strange
1068 		 * way so we must seek to end on non-oplocked files by
1069 		 * setting the revalidate time to zero.
1070 		 */
1071 		CIFS_I(inode)->time = 0;
1072 
1073 		rc = cifs_revalidate_file_attr(file);
1074 		if (rc < 0)
1075 			return (loff_t)rc;
1076 	}
1077 	if (cfile && cfile->tlink) {
1078 		tcon = tlink_tcon(cfile->tlink);
1079 		if (tcon->ses->server->ops->llseek)
1080 			return tcon->ses->server->ops->llseek(file, tcon,
1081 							      offset, whence);
1082 	}
1083 	return generic_file_llseek(file, offset, whence);
1084 }
1085 
1086 static int
1087 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1088 {
1089 	/*
1090 	 * Note that this is called by vfs setlease with i_lock held to
1091 	 * protect *lease from going away.
1092 	 */
1093 	struct inode *inode = file_inode(file);
1094 	struct cifsFileInfo *cfile = file->private_data;
1095 
1096 	/* Check if file is oplocked if this is request for new lease */
1097 	if (arg == F_UNLCK ||
1098 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1099 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1100 		return generic_setlease(file, arg, lease, priv);
1101 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1102 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1103 		/*
1104 		 * If the server claims to support oplock on this file, then we
1105 		 * still need to check oplock even if the local_lease mount
1106 		 * option is set, but there are servers which do not support
1107 		 * oplock for which this mount option may be useful if the user
1108 		 * knows that the file won't be changed on the server by anyone
1109 		 * else.
1110 		 */
1111 		return generic_setlease(file, arg, lease, priv);
1112 	else
1113 		return -EAGAIN;
1114 }
1115 
1116 struct file_system_type cifs_fs_type = {
1117 	.owner = THIS_MODULE,
1118 	.name = "cifs",
1119 	.init_fs_context = smb3_init_fs_context,
1120 	.parameters = smb3_fs_parameters,
1121 	.kill_sb = cifs_kill_sb,
1122 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1123 };
1124 MODULE_ALIAS_FS("cifs");
1125 
1126 struct file_system_type smb3_fs_type = {
1127 	.owner = THIS_MODULE,
1128 	.name = "smb3",
1129 	.init_fs_context = smb3_init_fs_context,
1130 	.parameters = smb3_fs_parameters,
1131 	.kill_sb = cifs_kill_sb,
1132 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1133 };
1134 MODULE_ALIAS_FS("smb3");
1135 MODULE_ALIAS("smb3");
1136 
1137 const struct inode_operations cifs_dir_inode_ops = {
1138 	.create = cifs_create,
1139 	.atomic_open = cifs_atomic_open,
1140 	.lookup = cifs_lookup,
1141 	.getattr = cifs_getattr,
1142 	.unlink = cifs_unlink,
1143 	.link = cifs_hardlink,
1144 	.mkdir = cifs_mkdir,
1145 	.rmdir = cifs_rmdir,
1146 	.rename = cifs_rename2,
1147 	.permission = cifs_permission,
1148 	.setattr = cifs_setattr,
1149 	.symlink = cifs_symlink,
1150 	.mknod   = cifs_mknod,
1151 	.listxattr = cifs_listxattr,
1152 	.get_acl = cifs_get_acl,
1153 	.set_acl = cifs_set_acl,
1154 };
1155 
1156 const struct inode_operations cifs_file_inode_ops = {
1157 	.setattr = cifs_setattr,
1158 	.getattr = cifs_getattr,
1159 	.permission = cifs_permission,
1160 	.listxattr = cifs_listxattr,
1161 	.fiemap = cifs_fiemap,
1162 	.get_acl = cifs_get_acl,
1163 	.set_acl = cifs_set_acl,
1164 };
1165 
1166 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1167 			    struct delayed_call *done)
1168 {
1169 	char *target_path;
1170 
1171 	if (!dentry)
1172 		return ERR_PTR(-ECHILD);
1173 
1174 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1175 	if (!target_path)
1176 		return ERR_PTR(-ENOMEM);
1177 
1178 	spin_lock(&inode->i_lock);
1179 	if (likely(CIFS_I(inode)->symlink_target)) {
1180 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1181 	} else {
1182 		kfree(target_path);
1183 		target_path = ERR_PTR(-EOPNOTSUPP);
1184 	}
1185 	spin_unlock(&inode->i_lock);
1186 
1187 	if (!IS_ERR(target_path))
1188 		set_delayed_call(done, kfree_link, target_path);
1189 
1190 	return target_path;
1191 }
1192 
1193 const struct inode_operations cifs_symlink_inode_ops = {
1194 	.get_link = cifs_get_link,
1195 	.setattr = cifs_setattr,
1196 	.permission = cifs_permission,
1197 	.listxattr = cifs_listxattr,
1198 };
1199 
1200 /*
1201  * Advance the EOF marker to after the source range.
1202  */
1203 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1204 				struct cifs_tcon *src_tcon,
1205 				unsigned int xid, loff_t src_end)
1206 {
1207 	struct cifsFileInfo *writeable_srcfile;
1208 	int rc = -EINVAL;
1209 
1210 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1211 	if (writeable_srcfile) {
1212 		if (src_tcon->ses->server->ops->set_file_size)
1213 			rc = src_tcon->ses->server->ops->set_file_size(
1214 				xid, src_tcon, writeable_srcfile,
1215 				src_inode->i_size, true /* no need to set sparse */);
1216 		else
1217 			rc = -ENOSYS;
1218 		cifsFileInfo_put(writeable_srcfile);
1219 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1220 	}
1221 
1222 	if (rc < 0)
1223 		goto set_failed;
1224 
1225 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1226 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1227 	return 0;
1228 
1229 set_failed:
1230 	return filemap_write_and_wait(src_inode->i_mapping);
1231 }
1232 
1233 /*
1234  * Flush out either the folio that overlaps the beginning of a range in which
1235  * pos resides or the folio that overlaps the end of a range unless that folio
1236  * is entirely within the range we're going to invalidate.  We extend the flush
1237  * bounds to encompass the folio.
1238  */
1239 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1240 			    bool first)
1241 {
1242 	struct folio *folio;
1243 	unsigned long long fpos, fend;
1244 	pgoff_t index = pos / PAGE_SIZE;
1245 	size_t size;
1246 	int rc = 0;
1247 
1248 	folio = filemap_get_folio(inode->i_mapping, index);
1249 	if (IS_ERR(folio))
1250 		return 0;
1251 
1252 	size = folio_size(folio);
1253 	fpos = folio_pos(folio);
1254 	fend = fpos + size - 1;
1255 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1256 	*_fend   = max_t(unsigned long long, *_fend, fend);
1257 	if ((first && pos == fpos) || (!first && pos == fend))
1258 		goto out;
1259 
1260 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1261 out:
1262 	folio_put(folio);
1263 	return rc;
1264 }
1265 
1266 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1267 		struct file *dst_file, loff_t destoff, loff_t len,
1268 		unsigned int remap_flags)
1269 {
1270 	struct inode *src_inode = file_inode(src_file);
1271 	struct inode *target_inode = file_inode(dst_file);
1272 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1273 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1274 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1275 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1276 	struct cifs_tcon *target_tcon, *src_tcon;
1277 	unsigned long long destend, fstart, fend, old_size, new_size;
1278 	unsigned int xid;
1279 	int rc;
1280 
1281 	if (remap_flags & REMAP_FILE_DEDUP)
1282 		return -EOPNOTSUPP;
1283 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1284 		return -EINVAL;
1285 
1286 	cifs_dbg(FYI, "clone range\n");
1287 
1288 	xid = get_xid();
1289 
1290 	if (!smb_file_src || !smb_file_target) {
1291 		rc = -EBADF;
1292 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1293 		goto out;
1294 	}
1295 
1296 	src_tcon = tlink_tcon(smb_file_src->tlink);
1297 	target_tcon = tlink_tcon(smb_file_target->tlink);
1298 
1299 	/*
1300 	 * Note: cifs case is easier than btrfs since server responsible for
1301 	 * checks for proper open modes and file type and if it wants
1302 	 * server could even support copy of range where source = target
1303 	 */
1304 	lock_two_nondirectories(target_inode, src_inode);
1305 
1306 	if (len == 0)
1307 		len = src_inode->i_size - off;
1308 
1309 	cifs_dbg(FYI, "clone range\n");
1310 
1311 	/* Flush the source buffer */
1312 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1313 					  off + len - 1);
1314 	if (rc)
1315 		goto unlock;
1316 
1317 	/* The server-side copy will fail if the source crosses the EOF marker.
1318 	 * Advance the EOF marker after the flush above to the end of the range
1319 	 * if it's short of that.
1320 	 */
1321 	if (src_cifsi->netfs.remote_i_size < off + len) {
1322 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1323 		if (rc < 0)
1324 			goto unlock;
1325 	}
1326 
1327 	new_size = destoff + len;
1328 	destend = destoff + len - 1;
1329 
1330 	/* Flush the folios at either end of the destination range to prevent
1331 	 * accidental loss of dirty data outside of the range.
1332 	 */
1333 	fstart = destoff;
1334 	fend = destend;
1335 
1336 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1337 	if (rc)
1338 		goto unlock;
1339 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1340 	if (rc)
1341 		goto unlock;
1342 	if (fend > target_cifsi->netfs.zero_point)
1343 		target_cifsi->netfs.zero_point = fend + 1;
1344 	old_size = target_cifsi->netfs.remote_i_size;
1345 
1346 	/* Discard all the folios that overlap the destination region. */
1347 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1348 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1349 
1350 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1351 			   i_size_read(target_inode), 0);
1352 
1353 	rc = -EOPNOTSUPP;
1354 	if (target_tcon->ses->server->ops->duplicate_extents) {
1355 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1356 			smb_file_src, smb_file_target, off, len, destoff);
1357 		if (rc == 0 && new_size > old_size) {
1358 			truncate_setsize(target_inode, new_size);
1359 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1360 					      new_size);
1361 		}
1362 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1363 			target_cifsi->netfs.zero_point = new_size;
1364 	}
1365 
1366 	/* force revalidate of size and timestamps of target file now
1367 	   that target is updated on the server */
1368 	CIFS_I(target_inode)->time = 0;
1369 unlock:
1370 	/* although unlocking in the reverse order from locking is not
1371 	   strictly necessary here it is a little cleaner to be consistent */
1372 	unlock_two_nondirectories(src_inode, target_inode);
1373 out:
1374 	free_xid(xid);
1375 	return rc < 0 ? rc : len;
1376 }
1377 
1378 ssize_t cifs_file_copychunk_range(unsigned int xid,
1379 				struct file *src_file, loff_t off,
1380 				struct file *dst_file, loff_t destoff,
1381 				size_t len, unsigned int flags)
1382 {
1383 	struct inode *src_inode = file_inode(src_file);
1384 	struct inode *target_inode = file_inode(dst_file);
1385 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1386 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1387 	struct cifsFileInfo *smb_file_src;
1388 	struct cifsFileInfo *smb_file_target;
1389 	struct cifs_tcon *src_tcon;
1390 	struct cifs_tcon *target_tcon;
1391 	ssize_t rc;
1392 
1393 	cifs_dbg(FYI, "copychunk range\n");
1394 
1395 	if (!src_file->private_data || !dst_file->private_data) {
1396 		rc = -EBADF;
1397 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1398 		goto out;
1399 	}
1400 
1401 	rc = -EXDEV;
1402 	smb_file_target = dst_file->private_data;
1403 	smb_file_src = src_file->private_data;
1404 	src_tcon = tlink_tcon(smb_file_src->tlink);
1405 	target_tcon = tlink_tcon(smb_file_target->tlink);
1406 
1407 	if (src_tcon->ses != target_tcon->ses) {
1408 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1409 		goto out;
1410 	}
1411 
1412 	rc = -EOPNOTSUPP;
1413 	if (!target_tcon->ses->server->ops->copychunk_range)
1414 		goto out;
1415 
1416 	/*
1417 	 * Note: cifs case is easier than btrfs since server responsible for
1418 	 * checks for proper open modes and file type and if it wants
1419 	 * server could even support copy of range where source = target
1420 	 */
1421 	lock_two_nondirectories(target_inode, src_inode);
1422 
1423 	cifs_dbg(FYI, "about to flush pages\n");
1424 
1425 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1426 					  off + len - 1);
1427 	if (rc)
1428 		goto unlock;
1429 
1430 	/* The server-side copy will fail if the source crosses the EOF marker.
1431 	 * Advance the EOF marker after the flush above to the end of the range
1432 	 * if it's short of that.
1433 	 */
1434 	if (src_cifsi->netfs.remote_i_size < off + len) {
1435 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1436 		if (rc < 0)
1437 			goto unlock;
1438 	}
1439 
1440 	/* Flush and invalidate all the folios in the destination region.  If
1441 	 * the copy was successful, then some of the flush is extra overhead,
1442 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1443 	 */
1444 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1445 	if (rc)
1446 		goto unlock;
1447 
1448 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1449 			   i_size_read(target_inode), 0);
1450 
1451 	rc = file_modified(dst_file);
1452 	if (!rc) {
1453 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1454 			smb_file_src, smb_file_target, off, len, destoff);
1455 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1456 			truncate_setsize(target_inode, destoff + rc);
1457 			netfs_resize_file(&target_cifsi->netfs,
1458 					  i_size_read(target_inode), true);
1459 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1460 					      i_size_read(target_inode));
1461 		}
1462 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1463 			target_cifsi->netfs.zero_point = destoff + rc;
1464 	}
1465 
1466 	file_accessed(src_file);
1467 
1468 	/* force revalidate of size and timestamps of target file now
1469 	 * that target is updated on the server
1470 	 */
1471 	CIFS_I(target_inode)->time = 0;
1472 
1473 unlock:
1474 	/* although unlocking in the reverse order from locking is not
1475 	 * strictly necessary here it is a little cleaner to be consistent
1476 	 */
1477 	unlock_two_nondirectories(src_inode, target_inode);
1478 
1479 out:
1480 	return rc;
1481 }
1482 
1483 /*
1484  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1485  * is a dummy operation.
1486  */
1487 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1488 {
1489 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1490 		 file, datasync);
1491 
1492 	return 0;
1493 }
1494 
1495 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1496 				struct file *dst_file, loff_t destoff,
1497 				size_t len, unsigned int flags)
1498 {
1499 	unsigned int xid = get_xid();
1500 	ssize_t rc;
1501 	struct cifsFileInfo *cfile = dst_file->private_data;
1502 
1503 	if (cfile->swapfile) {
1504 		rc = -EOPNOTSUPP;
1505 		free_xid(xid);
1506 		return rc;
1507 	}
1508 
1509 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1510 					len, flags);
1511 	free_xid(xid);
1512 
1513 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1514 		rc = splice_copy_file_range(src_file, off, dst_file,
1515 					    destoff, len);
1516 	return rc;
1517 }
1518 
1519 const struct file_operations cifs_file_ops = {
1520 	.read_iter = cifs_loose_read_iter,
1521 	.write_iter = cifs_file_write_iter,
1522 	.open = cifs_open,
1523 	.release = cifs_close,
1524 	.lock = cifs_lock,
1525 	.flock = cifs_flock,
1526 	.fsync = cifs_fsync,
1527 	.flush = cifs_flush,
1528 	.mmap  = cifs_file_mmap,
1529 	.splice_read = filemap_splice_read,
1530 	.splice_write = iter_file_splice_write,
1531 	.llseek = cifs_llseek,
1532 	.unlocked_ioctl	= cifs_ioctl,
1533 	.copy_file_range = cifs_copy_file_range,
1534 	.remap_file_range = cifs_remap_file_range,
1535 	.setlease = cifs_setlease,
1536 	.fallocate = cifs_fallocate,
1537 };
1538 
1539 const struct file_operations cifs_file_strict_ops = {
1540 	.read_iter = cifs_strict_readv,
1541 	.write_iter = cifs_strict_writev,
1542 	.open = cifs_open,
1543 	.release = cifs_close,
1544 	.lock = cifs_lock,
1545 	.flock = cifs_flock,
1546 	.fsync = cifs_strict_fsync,
1547 	.flush = cifs_flush,
1548 	.mmap = cifs_file_strict_mmap,
1549 	.splice_read = filemap_splice_read,
1550 	.splice_write = iter_file_splice_write,
1551 	.llseek = cifs_llseek,
1552 	.unlocked_ioctl	= cifs_ioctl,
1553 	.copy_file_range = cifs_copy_file_range,
1554 	.remap_file_range = cifs_remap_file_range,
1555 	.setlease = cifs_setlease,
1556 	.fallocate = cifs_fallocate,
1557 };
1558 
1559 const struct file_operations cifs_file_direct_ops = {
1560 	.read_iter = netfs_unbuffered_read_iter,
1561 	.write_iter = netfs_file_write_iter,
1562 	.open = cifs_open,
1563 	.release = cifs_close,
1564 	.lock = cifs_lock,
1565 	.flock = cifs_flock,
1566 	.fsync = cifs_fsync,
1567 	.flush = cifs_flush,
1568 	.mmap = cifs_file_mmap,
1569 	.splice_read = copy_splice_read,
1570 	.splice_write = iter_file_splice_write,
1571 	.unlocked_ioctl  = cifs_ioctl,
1572 	.copy_file_range = cifs_copy_file_range,
1573 	.remap_file_range = cifs_remap_file_range,
1574 	.llseek = cifs_llseek,
1575 	.setlease = cifs_setlease,
1576 	.fallocate = cifs_fallocate,
1577 };
1578 
1579 const struct file_operations cifs_file_nobrl_ops = {
1580 	.read_iter = cifs_loose_read_iter,
1581 	.write_iter = cifs_file_write_iter,
1582 	.open = cifs_open,
1583 	.release = cifs_close,
1584 	.fsync = cifs_fsync,
1585 	.flush = cifs_flush,
1586 	.mmap  = cifs_file_mmap,
1587 	.splice_read = filemap_splice_read,
1588 	.splice_write = iter_file_splice_write,
1589 	.llseek = cifs_llseek,
1590 	.unlocked_ioctl	= cifs_ioctl,
1591 	.copy_file_range = cifs_copy_file_range,
1592 	.remap_file_range = cifs_remap_file_range,
1593 	.setlease = cifs_setlease,
1594 	.fallocate = cifs_fallocate,
1595 };
1596 
1597 const struct file_operations cifs_file_strict_nobrl_ops = {
1598 	.read_iter = cifs_strict_readv,
1599 	.write_iter = cifs_strict_writev,
1600 	.open = cifs_open,
1601 	.release = cifs_close,
1602 	.fsync = cifs_strict_fsync,
1603 	.flush = cifs_flush,
1604 	.mmap = cifs_file_strict_mmap,
1605 	.splice_read = filemap_splice_read,
1606 	.splice_write = iter_file_splice_write,
1607 	.llseek = cifs_llseek,
1608 	.unlocked_ioctl	= cifs_ioctl,
1609 	.copy_file_range = cifs_copy_file_range,
1610 	.remap_file_range = cifs_remap_file_range,
1611 	.setlease = cifs_setlease,
1612 	.fallocate = cifs_fallocate,
1613 };
1614 
1615 const struct file_operations cifs_file_direct_nobrl_ops = {
1616 	.read_iter = netfs_unbuffered_read_iter,
1617 	.write_iter = netfs_file_write_iter,
1618 	.open = cifs_open,
1619 	.release = cifs_close,
1620 	.fsync = cifs_fsync,
1621 	.flush = cifs_flush,
1622 	.mmap = cifs_file_mmap,
1623 	.splice_read = copy_splice_read,
1624 	.splice_write = iter_file_splice_write,
1625 	.unlocked_ioctl  = cifs_ioctl,
1626 	.copy_file_range = cifs_copy_file_range,
1627 	.remap_file_range = cifs_remap_file_range,
1628 	.llseek = cifs_llseek,
1629 	.setlease = cifs_setlease,
1630 	.fallocate = cifs_fallocate,
1631 };
1632 
1633 const struct file_operations cifs_dir_ops = {
1634 	.iterate_shared = cifs_readdir,
1635 	.release = cifs_closedir,
1636 	.read    = generic_read_dir,
1637 	.unlocked_ioctl  = cifs_ioctl,
1638 	.copy_file_range = cifs_copy_file_range,
1639 	.remap_file_range = cifs_remap_file_range,
1640 	.llseek = generic_file_llseek,
1641 	.fsync = cifs_dir_fsync,
1642 };
1643 
1644 static void
1645 cifs_init_once(void *inode)
1646 {
1647 	struct cifsInodeInfo *cifsi = inode;
1648 
1649 	inode_init_once(&cifsi->netfs.inode);
1650 	init_rwsem(&cifsi->lock_sem);
1651 }
1652 
1653 static int __init
1654 cifs_init_inodecache(void)
1655 {
1656 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1657 					      sizeof(struct cifsInodeInfo),
1658 					      0, (SLAB_RECLAIM_ACCOUNT|
1659 						SLAB_ACCOUNT),
1660 					      cifs_init_once);
1661 	if (cifs_inode_cachep == NULL)
1662 		return -ENOMEM;
1663 
1664 	return 0;
1665 }
1666 
1667 static void
1668 cifs_destroy_inodecache(void)
1669 {
1670 	/*
1671 	 * Make sure all delayed rcu free inodes are flushed before we
1672 	 * destroy cache.
1673 	 */
1674 	rcu_barrier();
1675 	kmem_cache_destroy(cifs_inode_cachep);
1676 }
1677 
1678 static int
1679 cifs_init_request_bufs(void)
1680 {
1681 	/*
1682 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1683 	 * allocate some more bytes for CIFS.
1684 	 */
1685 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1686 
1687 	if (CIFSMaxBufSize < 8192) {
1688 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1689 	Unicode path name has to fit in any SMB/CIFS path based frames */
1690 		CIFSMaxBufSize = 8192;
1691 	} else if (CIFSMaxBufSize > 1024*127) {
1692 		CIFSMaxBufSize = 1024 * 127;
1693 	} else {
1694 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1695 	}
1696 /*
1697 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1698 		 CIFSMaxBufSize, CIFSMaxBufSize);
1699 */
1700 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1701 					    CIFSMaxBufSize + max_hdr_size, 0,
1702 					    SLAB_HWCACHE_ALIGN, 0,
1703 					    CIFSMaxBufSize + max_hdr_size,
1704 					    NULL);
1705 	if (cifs_req_cachep == NULL)
1706 		return -ENOMEM;
1707 
1708 	if (cifs_min_rcv < 1)
1709 		cifs_min_rcv = 1;
1710 	else if (cifs_min_rcv > 64) {
1711 		cifs_min_rcv = 64;
1712 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1713 	}
1714 
1715 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1716 						  cifs_req_cachep);
1717 
1718 	if (cifs_req_poolp == NULL) {
1719 		kmem_cache_destroy(cifs_req_cachep);
1720 		return -ENOMEM;
1721 	}
1722 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1723 	almost all handle based requests (but not write response, nor is it
1724 	sufficient for path based requests).  A smaller size would have
1725 	been more efficient (compacting multiple slab items on one 4k page)
1726 	for the case in which debug was on, but this larger size allows
1727 	more SMBs to use small buffer alloc and is still much more
1728 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1729 	alloc of large cifs buffers even when page debugging is on */
1730 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1731 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1732 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1733 	if (cifs_sm_req_cachep == NULL) {
1734 		mempool_destroy(cifs_req_poolp);
1735 		kmem_cache_destroy(cifs_req_cachep);
1736 		return -ENOMEM;
1737 	}
1738 
1739 	if (cifs_min_small < 2)
1740 		cifs_min_small = 2;
1741 	else if (cifs_min_small > 256) {
1742 		cifs_min_small = 256;
1743 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1744 	}
1745 
1746 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1747 						     cifs_sm_req_cachep);
1748 
1749 	if (cifs_sm_req_poolp == NULL) {
1750 		mempool_destroy(cifs_req_poolp);
1751 		kmem_cache_destroy(cifs_req_cachep);
1752 		kmem_cache_destroy(cifs_sm_req_cachep);
1753 		return -ENOMEM;
1754 	}
1755 
1756 	return 0;
1757 }
1758 
1759 static void
1760 cifs_destroy_request_bufs(void)
1761 {
1762 	mempool_destroy(cifs_req_poolp);
1763 	kmem_cache_destroy(cifs_req_cachep);
1764 	mempool_destroy(cifs_sm_req_poolp);
1765 	kmem_cache_destroy(cifs_sm_req_cachep);
1766 }
1767 
1768 static int init_mids(void)
1769 {
1770 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1771 					    sizeof(struct mid_q_entry), 0,
1772 					    SLAB_HWCACHE_ALIGN, NULL);
1773 	if (cifs_mid_cachep == NULL)
1774 		return -ENOMEM;
1775 
1776 	/* 3 is a reasonable minimum number of simultaneous operations */
1777 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1778 	if (cifs_mid_poolp == NULL) {
1779 		kmem_cache_destroy(cifs_mid_cachep);
1780 		return -ENOMEM;
1781 	}
1782 
1783 	return 0;
1784 }
1785 
1786 static void destroy_mids(void)
1787 {
1788 	mempool_destroy(cifs_mid_poolp);
1789 	kmem_cache_destroy(cifs_mid_cachep);
1790 }
1791 
1792 static int cifs_init_netfs(void)
1793 {
1794 	cifs_io_request_cachep =
1795 		kmem_cache_create("cifs_io_request",
1796 				  sizeof(struct cifs_io_request), 0,
1797 				  SLAB_HWCACHE_ALIGN, NULL);
1798 	if (!cifs_io_request_cachep)
1799 		goto nomem_req;
1800 
1801 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1802 		goto nomem_reqpool;
1803 
1804 	cifs_io_subrequest_cachep =
1805 		kmem_cache_create("cifs_io_subrequest",
1806 				  sizeof(struct cifs_io_subrequest), 0,
1807 				  SLAB_HWCACHE_ALIGN, NULL);
1808 	if (!cifs_io_subrequest_cachep)
1809 		goto nomem_subreq;
1810 
1811 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1812 		goto nomem_subreqpool;
1813 
1814 	return 0;
1815 
1816 nomem_subreqpool:
1817 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1818 nomem_subreq:
1819 	mempool_exit(&cifs_io_request_pool);
1820 nomem_reqpool:
1821 	kmem_cache_destroy(cifs_io_request_cachep);
1822 nomem_req:
1823 	return -ENOMEM;
1824 }
1825 
1826 static void cifs_destroy_netfs(void)
1827 {
1828 	mempool_exit(&cifs_io_subrequest_pool);
1829 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1830 	mempool_exit(&cifs_io_request_pool);
1831 	kmem_cache_destroy(cifs_io_request_cachep);
1832 }
1833 
1834 static int __init
1835 init_cifs(void)
1836 {
1837 	int rc = 0;
1838 	cifs_proc_init();
1839 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1840 /*
1841  *  Initialize Global counters
1842  */
1843 	atomic_set(&sesInfoAllocCount, 0);
1844 	atomic_set(&tconInfoAllocCount, 0);
1845 	atomic_set(&tcpSesNextId, 0);
1846 	atomic_set(&tcpSesAllocCount, 0);
1847 	atomic_set(&tcpSesReconnectCount, 0);
1848 	atomic_set(&tconInfoReconnectCount, 0);
1849 
1850 	atomic_set(&buf_alloc_count, 0);
1851 	atomic_set(&small_buf_alloc_count, 0);
1852 #ifdef CONFIG_CIFS_STATS2
1853 	atomic_set(&total_buf_alloc_count, 0);
1854 	atomic_set(&total_small_buf_alloc_count, 0);
1855 	if (slow_rsp_threshold < 1)
1856 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1857 	else if (slow_rsp_threshold > 32767)
1858 		cifs_dbg(VFS,
1859 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1860 #endif /* CONFIG_CIFS_STATS2 */
1861 
1862 	atomic_set(&mid_count, 0);
1863 	GlobalCurrentXid = 0;
1864 	GlobalTotalActiveXid = 0;
1865 	GlobalMaxActiveXid = 0;
1866 	spin_lock_init(&cifs_tcp_ses_lock);
1867 	spin_lock_init(&GlobalMid_Lock);
1868 
1869 	cifs_lock_secret = get_random_u32();
1870 
1871 	if (cifs_max_pending < 2) {
1872 		cifs_max_pending = 2;
1873 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1874 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1875 		cifs_max_pending = CIFS_MAX_REQ;
1876 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1877 			 CIFS_MAX_REQ);
1878 	}
1879 
1880 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1881 	if (dir_cache_timeout > 65000) {
1882 		dir_cache_timeout = 65000;
1883 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1884 	}
1885 
1886 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1887 	if (!cifsiod_wq) {
1888 		rc = -ENOMEM;
1889 		goto out_clean_proc;
1890 	}
1891 
1892 	/*
1893 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1894 	 * so that we don't launch too many worker threads but
1895 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1896 	 */
1897 
1898 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1899 	decrypt_wq = alloc_workqueue("smb3decryptd",
1900 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1901 	if (!decrypt_wq) {
1902 		rc = -ENOMEM;
1903 		goto out_destroy_cifsiod_wq;
1904 	}
1905 
1906 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1907 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1908 	if (!fileinfo_put_wq) {
1909 		rc = -ENOMEM;
1910 		goto out_destroy_decrypt_wq;
1911 	}
1912 
1913 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1914 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1915 	if (!cifsoplockd_wq) {
1916 		rc = -ENOMEM;
1917 		goto out_destroy_fileinfo_put_wq;
1918 	}
1919 
1920 	deferredclose_wq = alloc_workqueue("deferredclose",
1921 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1922 	if (!deferredclose_wq) {
1923 		rc = -ENOMEM;
1924 		goto out_destroy_cifsoplockd_wq;
1925 	}
1926 
1927 	serverclose_wq = alloc_workqueue("serverclose",
1928 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1929 	if (!serverclose_wq) {
1930 		rc = -ENOMEM;
1931 		goto out_destroy_deferredclose_wq;
1932 	}
1933 
1934 	cfid_put_wq = alloc_workqueue("cfid_put_wq",
1935 				      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1936 	if (!cfid_put_wq) {
1937 		rc = -ENOMEM;
1938 		goto out_destroy_serverclose_wq;
1939 	}
1940 
1941 	rc = cifs_init_inodecache();
1942 	if (rc)
1943 		goto out_destroy_cfid_put_wq;
1944 
1945 	rc = cifs_init_netfs();
1946 	if (rc)
1947 		goto out_destroy_inodecache;
1948 
1949 	rc = init_mids();
1950 	if (rc)
1951 		goto out_destroy_netfs;
1952 
1953 	rc = cifs_init_request_bufs();
1954 	if (rc)
1955 		goto out_destroy_mids;
1956 
1957 #ifdef CONFIG_CIFS_DFS_UPCALL
1958 	rc = dfs_cache_init();
1959 	if (rc)
1960 		goto out_destroy_request_bufs;
1961 #endif /* CONFIG_CIFS_DFS_UPCALL */
1962 #ifdef CONFIG_CIFS_UPCALL
1963 	rc = init_cifs_spnego();
1964 	if (rc)
1965 		goto out_destroy_dfs_cache;
1966 #endif /* CONFIG_CIFS_UPCALL */
1967 #ifdef CONFIG_CIFS_SWN_UPCALL
1968 	rc = cifs_genl_init();
1969 	if (rc)
1970 		goto out_register_key_type;
1971 #endif /* CONFIG_CIFS_SWN_UPCALL */
1972 
1973 	rc = init_cifs_idmap();
1974 	if (rc)
1975 		goto out_cifs_swn_init;
1976 
1977 	rc = register_filesystem(&cifs_fs_type);
1978 	if (rc)
1979 		goto out_init_cifs_idmap;
1980 
1981 	rc = register_filesystem(&smb3_fs_type);
1982 	if (rc) {
1983 		unregister_filesystem(&cifs_fs_type);
1984 		goto out_init_cifs_idmap;
1985 	}
1986 
1987 	return 0;
1988 
1989 out_init_cifs_idmap:
1990 	exit_cifs_idmap();
1991 out_cifs_swn_init:
1992 #ifdef CONFIG_CIFS_SWN_UPCALL
1993 	cifs_genl_exit();
1994 out_register_key_type:
1995 #endif
1996 #ifdef CONFIG_CIFS_UPCALL
1997 	exit_cifs_spnego();
1998 out_destroy_dfs_cache:
1999 #endif
2000 #ifdef CONFIG_CIFS_DFS_UPCALL
2001 	dfs_cache_destroy();
2002 out_destroy_request_bufs:
2003 #endif
2004 	cifs_destroy_request_bufs();
2005 out_destroy_mids:
2006 	destroy_mids();
2007 out_destroy_netfs:
2008 	cifs_destroy_netfs();
2009 out_destroy_inodecache:
2010 	cifs_destroy_inodecache();
2011 out_destroy_cfid_put_wq:
2012 	destroy_workqueue(cfid_put_wq);
2013 out_destroy_serverclose_wq:
2014 	destroy_workqueue(serverclose_wq);
2015 out_destroy_deferredclose_wq:
2016 	destroy_workqueue(deferredclose_wq);
2017 out_destroy_cifsoplockd_wq:
2018 	destroy_workqueue(cifsoplockd_wq);
2019 out_destroy_fileinfo_put_wq:
2020 	destroy_workqueue(fileinfo_put_wq);
2021 out_destroy_decrypt_wq:
2022 	destroy_workqueue(decrypt_wq);
2023 out_destroy_cifsiod_wq:
2024 	destroy_workqueue(cifsiod_wq);
2025 out_clean_proc:
2026 	cifs_proc_clean();
2027 	return rc;
2028 }
2029 
2030 static void __exit
2031 exit_cifs(void)
2032 {
2033 	cifs_dbg(NOISY, "exit_smb3\n");
2034 	unregister_filesystem(&cifs_fs_type);
2035 	unregister_filesystem(&smb3_fs_type);
2036 	cifs_release_automount_timer();
2037 	exit_cifs_idmap();
2038 #ifdef CONFIG_CIFS_SWN_UPCALL
2039 	cifs_genl_exit();
2040 #endif
2041 #ifdef CONFIG_CIFS_UPCALL
2042 	exit_cifs_spnego();
2043 #endif
2044 #ifdef CONFIG_CIFS_DFS_UPCALL
2045 	dfs_cache_destroy();
2046 #endif
2047 	cifs_destroy_request_bufs();
2048 	destroy_mids();
2049 	cifs_destroy_netfs();
2050 	cifs_destroy_inodecache();
2051 	destroy_workqueue(deferredclose_wq);
2052 	destroy_workqueue(cifsoplockd_wq);
2053 	destroy_workqueue(decrypt_wq);
2054 	destroy_workqueue(fileinfo_put_wq);
2055 	destroy_workqueue(serverclose_wq);
2056 	destroy_workqueue(cfid_put_wq);
2057 	destroy_workqueue(cifsiod_wq);
2058 	cifs_proc_clean();
2059 }
2060 
2061 MODULE_AUTHOR("Steve French");
2062 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2063 MODULE_DESCRIPTION
2064 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2065 	"also older servers complying with the SNIA CIFS Specification)");
2066 MODULE_VERSION(CIFS_VERSION);
2067 MODULE_SOFTDEP("ecb");
2068 MODULE_SOFTDEP("hmac");
2069 MODULE_SOFTDEP("md5");
2070 MODULE_SOFTDEP("nls");
2071 MODULE_SOFTDEP("aes");
2072 MODULE_SOFTDEP("cmac");
2073 MODULE_SOFTDEP("sha256");
2074 MODULE_SOFTDEP("sha512");
2075 MODULE_SOFTDEP("aead2");
2076 MODULE_SOFTDEP("ccm");
2077 MODULE_SOFTDEP("gcm");
2078 module_init(init_cifs)
2079 module_exit(exit_cifs)
2080