4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/uuid.h>
41 #include <linux/xattr.h>
45 #define DECLARE_GLOBALS_HERE
47 #include "cifsproto.h"
48 #include "cifs_debug.h"
49 #include "cifs_fs_sb.h"
51 #include <linux/key-type.h>
52 #include "cifs_spnego.h"
55 #ifdef CONFIG_CIFS_DFS_UPCALL
56 #include "dfs_cache.h"
60 * DOS dates from 1980/1/1 through 2107/12/31
61 * Protocol specifications indicate the range should be to 119, which
62 * limits maximum year to 2099. But this range has not been checked.
64 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
65 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
66 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
70 bool enable_oplocks = true;
71 bool linuxExtEnabled = true;
72 bool lookupCacheEnabled = true;
73 bool disable_legacy_dialects; /* false by default */
74 unsigned int global_secflags = CIFSSEC_DEF;
75 /* unsigned int ntlmv2_support = 0; */
76 unsigned int sign_CIFS_PDUs = 1;
77 static const struct super_operations cifs_super_ops;
78 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
79 module_param(CIFSMaxBufSize, uint, 0444);
80 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
82 "Default: 16384 Range: 8192 to 130048");
83 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
84 module_param(cifs_min_rcv, uint, 0444);
85 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
87 unsigned int cifs_min_small = 30;
88 module_param(cifs_min_small, uint, 0444);
89 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
91 unsigned int cifs_max_pending = CIFS_MAX_REQ;
92 module_param(cifs_max_pending, uint, 0444);
93 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
94 "CIFS/SMB1 dialect (N/A for SMB3) "
95 "Default: 32767 Range: 2 to 32767.");
96 #ifdef CONFIG_CIFS_STATS2
97 unsigned int slow_rsp_threshold = 1;
98 module_param(slow_rsp_threshold, uint, 0644);
99 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
100 "before logging that a response is delayed. "
101 "Default: 1 (if set to 0 disables msg).");
104 module_param(enable_oplocks, bool, 0644);
105 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
107 module_param(disable_legacy_dialects, bool, 0644);
108 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
109 "helpful to restrict the ability to "
110 "override the default dialects (SMB2.1, "
111 "SMB3 and SMB3.02) on mount with old "
112 "dialects (CIFS/SMB1 and SMB2) since "
113 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
114 " and less secure. Default: n/N/0");
116 extern mempool_t *cifs_sm_req_poolp;
117 extern mempool_t *cifs_req_poolp;
118 extern mempool_t *cifs_mid_poolp;
120 struct workqueue_struct *cifsiod_wq;
121 struct workqueue_struct *decrypt_wq;
122 struct workqueue_struct *cifsoplockd_wq;
123 __u32 cifs_lock_secret;
126 * Bumps refcount for cifs super block.
127 * Note that it should be only called if a referece to VFS super block is
128 * already held, e.g. in open-type syscalls context. Otherwise it can race with
129 * atomic_dec_and_test in deactivate_locked_super.
132 cifs_sb_active(struct super_block *sb)
134 struct cifs_sb_info *server = CIFS_SB(sb);
136 if (atomic_inc_return(&server->active) == 1)
137 atomic_inc(&sb->s_active);
141 cifs_sb_deactive(struct super_block *sb)
143 struct cifs_sb_info *server = CIFS_SB(sb);
145 if (atomic_dec_and_test(&server->active))
146 deactivate_super(sb);
150 cifs_read_super(struct super_block *sb)
153 struct cifs_sb_info *cifs_sb;
154 struct cifs_tcon *tcon;
155 struct timespec64 ts;
158 cifs_sb = CIFS_SB(sb);
159 tcon = cifs_sb_master_tcon(cifs_sb);
161 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
162 sb->s_flags |= SB_POSIXACL;
164 if (tcon->snapshot_time)
165 sb->s_flags |= SB_RDONLY;
167 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
168 sb->s_maxbytes = MAX_LFS_FILESIZE;
170 sb->s_maxbytes = MAX_NON_LFS;
172 /* Some very old servers like DOS and OS/2 used 2 second granularity */
173 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
174 ((tcon->ses->capabilities &
175 tcon->ses->server->vals->cap_nt_find) == 0) &&
177 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
178 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
179 sb->s_time_min = ts.tv_sec;
180 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
181 cpu_to_le16(SMB_TIME_MAX), 0);
182 sb->s_time_max = ts.tv_sec;
185 * Almost every server, including all SMB2+, uses DCE TIME
186 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
188 sb->s_time_gran = 100;
189 ts = cifs_NTtimeToUnix(0);
190 sb->s_time_min = ts.tv_sec;
191 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
192 sb->s_time_max = ts.tv_sec;
195 sb->s_magic = CIFS_MAGIC_NUMBER;
196 sb->s_op = &cifs_super_ops;
197 sb->s_xattr = cifs_xattr_handlers;
198 rc = super_setup_bdi(sb);
201 /* tune readahead according to rsize */
202 sb->s_bdi->ra_pages = cifs_sb->rsize / PAGE_SIZE;
204 sb->s_blocksize = CIFS_MAX_MSGSIZE;
205 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
206 inode = cifs_root_iget(sb);
214 sb->s_d_op = &cifs_ci_dentry_ops;
216 sb->s_d_op = &cifs_dentry_ops;
218 sb->s_root = d_make_root(inode);
224 #ifdef CONFIG_CIFS_NFSD_EXPORT
225 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
226 cifs_dbg(FYI, "export ops supported\n");
227 sb->s_export_op = &cifs_export_ops;
229 #endif /* CONFIG_CIFS_NFSD_EXPORT */
234 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
238 static void cifs_kill_sb(struct super_block *sb)
240 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
242 cifs_umount(cifs_sb);
246 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
248 struct super_block *sb = dentry->d_sb;
249 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
250 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
251 struct TCP_Server_Info *server = tcon->ses->server;
257 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
259 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
261 buf->f_namelen = PATH_MAX;
263 buf->f_fsid.val[0] = tcon->vol_serial_number;
264 /* are using part of create time for more randomness, see man statfs */
265 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
267 buf->f_files = 0; /* undefined */
268 buf->f_ffree = 0; /* unlimited */
270 if (server->ops->queryfs)
271 rc = server->ops->queryfs(xid, tcon, buf);
277 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
279 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
280 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
281 struct TCP_Server_Info *server = tcon->ses->server;
283 if (server->ops->fallocate)
284 return server->ops->fallocate(file, tcon, mode, off, len);
289 static int cifs_permission(struct inode *inode, int mask)
291 struct cifs_sb_info *cifs_sb;
293 cifs_sb = CIFS_SB(inode->i_sb);
295 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
296 if ((mask & MAY_EXEC) && !execute_ok(inode))
300 } else /* file mode might have been restricted at mount time
301 on the client (above and beyond ACL on servers) for
302 servers which do not support setting and viewing mode bits,
303 so allowing client to check permissions is useful */
304 return generic_permission(inode, mask);
307 static struct kmem_cache *cifs_inode_cachep;
308 static struct kmem_cache *cifs_req_cachep;
309 static struct kmem_cache *cifs_mid_cachep;
310 static struct kmem_cache *cifs_sm_req_cachep;
311 mempool_t *cifs_sm_req_poolp;
312 mempool_t *cifs_req_poolp;
313 mempool_t *cifs_mid_poolp;
315 static struct inode *
316 cifs_alloc_inode(struct super_block *sb)
318 struct cifsInodeInfo *cifs_inode;
319 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
322 cifs_inode->cifsAttrs = 0x20; /* default */
323 cifs_inode->time = 0;
325 * Until the file is open and we have gotten oplock info back from the
326 * server, can not assume caching of file data or metadata.
328 cifs_set_oplock_level(cifs_inode, 0);
329 cifs_inode->flags = 0;
330 spin_lock_init(&cifs_inode->writers_lock);
331 cifs_inode->writers = 0;
332 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
333 cifs_inode->server_eof = 0;
334 cifs_inode->uniqueid = 0;
335 cifs_inode->createtime = 0;
336 cifs_inode->epoch = 0;
337 spin_lock_init(&cifs_inode->open_file_lock);
338 generate_random_uuid(cifs_inode->lease_key);
341 * Can not set i_flags here - they get immediately overwritten to zero
344 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
345 INIT_LIST_HEAD(&cifs_inode->openFileList);
346 INIT_LIST_HEAD(&cifs_inode->llist);
347 return &cifs_inode->vfs_inode;
351 cifs_free_inode(struct inode *inode)
353 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
357 cifs_evict_inode(struct inode *inode)
359 truncate_inode_pages_final(&inode->i_data);
361 cifs_fscache_release_inode_cookie(inode);
365 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
367 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
368 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
370 seq_puts(s, ",addr=");
372 switch (server->dstaddr.ss_family) {
374 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
377 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
378 if (sa6->sin6_scope_id)
379 seq_printf(s, "%%%u", sa6->sin6_scope_id);
382 seq_puts(s, "(unknown)");
385 seq_puts(s, ",rdma");
389 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
391 if (ses->sectype == Unspecified) {
392 if (ses->user_name == NULL)
393 seq_puts(s, ",sec=none");
397 seq_puts(s, ",sec=");
399 switch (ses->sectype) {
401 seq_puts(s, "lanman");
404 seq_puts(s, "ntlmv2");
410 seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
413 seq_puts(s, "ntlmssp");
416 /* shouldn't ever happen */
417 seq_puts(s, "unknown");
426 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
428 seq_puts(s, ",cache=");
430 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
431 seq_puts(s, "strict");
432 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
434 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
435 seq_puts(s, "singleclient"); /* assume only one client access */
436 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
437 seq_puts(s, "ro"); /* read only caching assumed */
439 seq_puts(s, "loose");
443 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
445 struct nls_table *def;
447 /* Display iocharset= option if it's not default charset */
448 def = load_nls_default();
450 seq_printf(s, ",iocharset=%s", cur->charset);
455 * cifs_show_options() is for displaying mount options in /proc/mounts.
456 * Not all settable options are displayed but most of the important
460 cifs_show_options(struct seq_file *s, struct dentry *root)
462 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
463 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
464 struct sockaddr *srcaddr;
465 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
467 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
468 cifs_show_security(s, tcon->ses);
469 cifs_show_cache_flavor(s, cifs_sb);
472 seq_puts(s, ",nolease");
473 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
474 seq_puts(s, ",multiuser");
475 else if (tcon->ses->user_name)
476 seq_show_option(s, "username", tcon->ses->user_name);
478 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
479 seq_show_option(s, "domain", tcon->ses->domainName);
481 if (srcaddr->sa_family != AF_UNSPEC) {
482 struct sockaddr_in *saddr4;
483 struct sockaddr_in6 *saddr6;
484 saddr4 = (struct sockaddr_in *)srcaddr;
485 saddr6 = (struct sockaddr_in6 *)srcaddr;
486 if (srcaddr->sa_family == AF_INET6)
487 seq_printf(s, ",srcaddr=%pI6c",
489 else if (srcaddr->sa_family == AF_INET)
490 seq_printf(s, ",srcaddr=%pI4",
491 &saddr4->sin_addr.s_addr);
493 seq_printf(s, ",srcaddr=BAD-AF:%i",
494 (int)(srcaddr->sa_family));
497 seq_printf(s, ",uid=%u",
498 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
499 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
500 seq_puts(s, ",forceuid");
502 seq_puts(s, ",noforceuid");
504 seq_printf(s, ",gid=%u",
505 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
506 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
507 seq_puts(s, ",forcegid");
509 seq_puts(s, ",noforcegid");
511 cifs_show_address(s, tcon->ses->server);
514 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
515 cifs_sb->mnt_file_mode,
516 cifs_sb->mnt_dir_mode);
518 cifs_show_nls(s, cifs_sb->local_nls);
521 seq_puts(s, ",seal");
523 seq_puts(s, ",nocase");
524 if (tcon->local_lease)
525 seq_puts(s, ",locallease");
527 seq_puts(s, ",hard");
529 seq_puts(s, ",soft");
530 if (tcon->use_persistent)
531 seq_puts(s, ",persistenthandles");
532 else if (tcon->use_resilient)
533 seq_puts(s, ",resilienthandles");
534 if (tcon->posix_extensions)
535 seq_puts(s, ",posix");
536 else if (tcon->unix_ext)
537 seq_puts(s, ",unix");
539 seq_puts(s, ",nounix");
540 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
541 seq_puts(s, ",nodfs");
542 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
543 seq_puts(s, ",posixpaths");
544 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
545 seq_puts(s, ",setuids");
546 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
547 seq_puts(s, ",idsfromsid");
548 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
549 seq_puts(s, ",serverino");
550 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
551 seq_puts(s, ",rwpidforward");
552 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
553 seq_puts(s, ",forcemand");
554 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
555 seq_puts(s, ",nouser_xattr");
556 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
557 seq_puts(s, ",mapchars");
558 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
559 seq_puts(s, ",mapposix");
560 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
562 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
563 seq_puts(s, ",nobrl");
564 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
565 seq_puts(s, ",nohandlecache");
566 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
567 seq_puts(s, ",modefromsid");
568 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
569 seq_puts(s, ",cifsacl");
570 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
571 seq_puts(s, ",dynperm");
572 if (root->d_sb->s_flags & SB_POSIXACL)
574 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
575 seq_puts(s, ",mfsymlinks");
576 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
578 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
579 seq_puts(s, ",nostrictsync");
580 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
581 seq_puts(s, ",noperm");
582 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
583 seq_printf(s, ",backupuid=%u",
584 from_kuid_munged(&init_user_ns,
585 cifs_sb->mnt_backupuid));
586 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
587 seq_printf(s, ",backupgid=%u",
588 from_kgid_munged(&init_user_ns,
589 cifs_sb->mnt_backupgid));
591 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
592 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
593 seq_printf(s, ",bsize=%u", cifs_sb->bsize);
594 if (tcon->ses->server->min_offload)
595 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
596 seq_printf(s, ",echo_interval=%lu",
597 tcon->ses->server->echo_interval / HZ);
599 /* Only display max_credits if it was overridden on mount */
600 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
601 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
603 if (tcon->snapshot_time)
604 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
605 if (tcon->handle_timeout)
606 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
607 /* convert actimeo and display it in seconds */
608 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
613 static void cifs_umount_begin(struct super_block *sb)
615 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
616 struct cifs_tcon *tcon;
621 tcon = cifs_sb_master_tcon(cifs_sb);
623 spin_lock(&cifs_tcp_ses_lock);
624 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
625 /* we have other mounts to same share or we have
626 already tried to force umount this and woken up
627 all waiting network requests, nothing to do */
628 spin_unlock(&cifs_tcp_ses_lock);
630 } else if (tcon->tc_count == 1)
631 tcon->tidStatus = CifsExiting;
632 spin_unlock(&cifs_tcp_ses_lock);
634 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
635 /* cancel_notify_requests(tcon); */
636 if (tcon->ses && tcon->ses->server) {
637 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
638 wake_up_all(&tcon->ses->server->request_q);
639 wake_up_all(&tcon->ses->server->response_q);
640 msleep(1); /* yield */
641 /* we have to kick the requests once more */
642 wake_up_all(&tcon->ses->server->response_q);
649 #ifdef CONFIG_CIFS_STATS2
650 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
657 static int cifs_remount(struct super_block *sb, int *flags, char *data)
660 *flags |= SB_NODIRATIME;
664 static int cifs_drop_inode(struct inode *inode)
666 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
668 /* no serverino => unconditional eviction */
669 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
670 generic_drop_inode(inode);
673 static const struct super_operations cifs_super_ops = {
674 .statfs = cifs_statfs,
675 .alloc_inode = cifs_alloc_inode,
676 .free_inode = cifs_free_inode,
677 .drop_inode = cifs_drop_inode,
678 .evict_inode = cifs_evict_inode,
679 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
680 function unless later we add lazy close of inodes or unless the
681 kernel forgets to call us with the same number of releases (closes)
683 .show_options = cifs_show_options,
684 .umount_begin = cifs_umount_begin,
685 .remount_fs = cifs_remount,
686 #ifdef CONFIG_CIFS_STATS2
687 .show_stats = cifs_show_stats,
692 * Get root dentry from superblock according to prefix path mount option.
693 * Return dentry with refcount + 1 on success and NULL otherwise.
695 static struct dentry *
696 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
698 struct dentry *dentry;
699 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
700 char *full_path = NULL;
704 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
705 return dget(sb->s_root);
707 full_path = cifs_build_path_to_root(vol, cifs_sb,
708 cifs_sb_master_tcon(cifs_sb), 0);
709 if (full_path == NULL)
710 return ERR_PTR(-ENOMEM);
712 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
714 sep = CIFS_DIR_SEP(cifs_sb);
715 dentry = dget(sb->s_root);
719 struct inode *dir = d_inode(dentry);
720 struct dentry *child;
722 if (!S_ISDIR(dir->i_mode)) {
724 dentry = ERR_PTR(-ENOTDIR);
728 /* skip separators */
735 while (*s && *s != sep)
738 child = lookup_positive_unlocked(p, dentry, s - p);
741 } while (!IS_ERR(dentry));
746 static int cifs_set_super(struct super_block *sb, void *data)
748 struct cifs_mnt_data *mnt_data = data;
749 sb->s_fs_info = mnt_data->cifs_sb;
750 return set_anon_super(sb, NULL);
753 static struct dentry *
754 cifs_smb3_do_mount(struct file_system_type *fs_type,
755 int flags, const char *dev_name, void *data, bool is_smb3)
758 struct super_block *sb;
759 struct cifs_sb_info *cifs_sb;
760 struct smb_vol *volume_info;
761 struct cifs_mnt_data mnt_data;
765 * Prints in Kernel / CIFS log the attempted mount operation
766 * If CIFS_DEBUG && cifs_FYI
769 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
771 cifs_info("Attempting to mount %s\n", dev_name);
773 volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
774 if (IS_ERR(volume_info))
775 return ERR_CAST(volume_info);
777 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
778 if (cifs_sb == NULL) {
779 root = ERR_PTR(-ENOMEM);
783 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
784 if (cifs_sb->mountdata == NULL) {
785 root = ERR_PTR(-ENOMEM);
789 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
795 rc = cifs_mount(cifs_sb, volume_info);
797 if (!(flags & SB_SILENT))
798 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
804 mnt_data.vol = volume_info;
805 mnt_data.cifs_sb = cifs_sb;
806 mnt_data.flags = flags;
808 /* BB should we make this contingent on mount parm? */
809 flags |= SB_NODIRATIME | SB_NOATIME;
811 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
814 cifs_umount(cifs_sb);
819 cifs_dbg(FYI, "Use existing superblock\n");
820 cifs_umount(cifs_sb);
822 rc = cifs_read_super(sb);
828 sb->s_flags |= SB_ACTIVE;
831 root = cifs_get_root(volume_info, sb);
835 cifs_dbg(FYI, "dentry root is: %p\n", root);
839 deactivate_locked_super(sb);
841 cifs_cleanup_volume_info(volume_info);
845 kfree(cifs_sb->prepath);
846 kfree(cifs_sb->mountdata);
849 unload_nls(volume_info->local_nls);
853 static struct dentry *
854 smb3_do_mount(struct file_system_type *fs_type,
855 int flags, const char *dev_name, void *data)
857 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, true);
860 static struct dentry *
861 cifs_do_mount(struct file_system_type *fs_type,
862 int flags, const char *dev_name, void *data)
864 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, false);
868 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
871 struct inode *inode = file_inode(iocb->ki_filp);
873 if (iocb->ki_filp->f_flags & O_DIRECT)
874 return cifs_user_readv(iocb, iter);
876 rc = cifs_revalidate_mapping(inode);
880 return generic_file_read_iter(iocb, iter);
883 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
885 struct inode *inode = file_inode(iocb->ki_filp);
886 struct cifsInodeInfo *cinode = CIFS_I(inode);
890 if (iocb->ki_filp->f_flags & O_DIRECT) {
891 written = cifs_user_writev(iocb, from);
892 if (written > 0 && CIFS_CACHE_READ(cinode)) {
893 cifs_zap_mapping(inode);
895 "Set no oplock for inode=%p after a write operation\n",
902 written = cifs_get_writer(cinode);
906 written = generic_file_write_iter(iocb, from);
908 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
911 rc = filemap_fdatawrite(inode->i_mapping);
913 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
917 cifs_put_writer(cinode);
921 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
923 struct cifsFileInfo *cfile = file->private_data;
924 struct cifs_tcon *tcon;
927 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
928 * the cached file length
930 if (whence != SEEK_SET && whence != SEEK_CUR) {
932 struct inode *inode = file_inode(file);
935 * We need to be sure that all dirty pages are written and the
936 * server has the newest file length.
938 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
939 inode->i_mapping->nrpages != 0) {
940 rc = filemap_fdatawait(inode->i_mapping);
942 mapping_set_error(inode->i_mapping, rc);
947 * Some applications poll for the file length in this strange
948 * way so we must seek to end on non-oplocked files by
949 * setting the revalidate time to zero.
951 CIFS_I(inode)->time = 0;
953 rc = cifs_revalidate_file_attr(file);
957 if (cfile && cfile->tlink) {
958 tcon = tlink_tcon(cfile->tlink);
959 if (tcon->ses->server->ops->llseek)
960 return tcon->ses->server->ops->llseek(file, tcon,
963 return generic_file_llseek(file, offset, whence);
967 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
970 * Note that this is called by vfs setlease with i_lock held to
971 * protect *lease from going away.
973 struct inode *inode = file_inode(file);
974 struct cifsFileInfo *cfile = file->private_data;
976 if (!(S_ISREG(inode->i_mode)))
979 /* Check if file is oplocked if this is request for new lease */
980 if (arg == F_UNLCK ||
981 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
982 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
983 return generic_setlease(file, arg, lease, priv);
984 else if (tlink_tcon(cfile->tlink)->local_lease &&
985 !CIFS_CACHE_READ(CIFS_I(inode)))
987 * If the server claims to support oplock on this file, then we
988 * still need to check oplock even if the local_lease mount
989 * option is set, but there are servers which do not support
990 * oplock for which this mount option may be useful if the user
991 * knows that the file won't be changed on the server by anyone
994 return generic_setlease(file, arg, lease, priv);
999 struct file_system_type cifs_fs_type = {
1000 .owner = THIS_MODULE,
1002 .mount = cifs_do_mount,
1003 .kill_sb = cifs_kill_sb,
1006 MODULE_ALIAS_FS("cifs");
1008 static struct file_system_type smb3_fs_type = {
1009 .owner = THIS_MODULE,
1011 .mount = smb3_do_mount,
1012 .kill_sb = cifs_kill_sb,
1015 MODULE_ALIAS_FS("smb3");
1016 MODULE_ALIAS("smb3");
1018 const struct inode_operations cifs_dir_inode_ops = {
1019 .create = cifs_create,
1020 .atomic_open = cifs_atomic_open,
1021 .lookup = cifs_lookup,
1022 .getattr = cifs_getattr,
1023 .unlink = cifs_unlink,
1024 .link = cifs_hardlink,
1025 .mkdir = cifs_mkdir,
1026 .rmdir = cifs_rmdir,
1027 .rename = cifs_rename2,
1028 .permission = cifs_permission,
1029 .setattr = cifs_setattr,
1030 .symlink = cifs_symlink,
1031 .mknod = cifs_mknod,
1032 .listxattr = cifs_listxattr,
1035 const struct inode_operations cifs_file_inode_ops = {
1036 .setattr = cifs_setattr,
1037 .getattr = cifs_getattr,
1038 .permission = cifs_permission,
1039 .listxattr = cifs_listxattr,
1040 .fiemap = cifs_fiemap,
1043 const struct inode_operations cifs_symlink_inode_ops = {
1044 .get_link = cifs_get_link,
1045 .permission = cifs_permission,
1046 .listxattr = cifs_listxattr,
1049 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1050 struct file *dst_file, loff_t destoff, loff_t len,
1051 unsigned int remap_flags)
1053 struct inode *src_inode = file_inode(src_file);
1054 struct inode *target_inode = file_inode(dst_file);
1055 struct cifsFileInfo *smb_file_src = src_file->private_data;
1056 struct cifsFileInfo *smb_file_target;
1057 struct cifs_tcon *target_tcon;
1061 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1064 cifs_dbg(FYI, "clone range\n");
1068 if (!src_file->private_data || !dst_file->private_data) {
1070 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1074 smb_file_target = dst_file->private_data;
1075 target_tcon = tlink_tcon(smb_file_target->tlink);
1078 * Note: cifs case is easier than btrfs since server responsible for
1079 * checks for proper open modes and file type and if it wants
1080 * server could even support copy of range where source = target
1082 lock_two_nondirectories(target_inode, src_inode);
1085 len = src_inode->i_size - off;
1087 cifs_dbg(FYI, "about to flush pages\n");
1088 /* should we flush first and last page first */
1089 truncate_inode_pages_range(&target_inode->i_data, destoff,
1090 PAGE_ALIGN(destoff + len)-1);
1092 if (target_tcon->ses->server->ops->duplicate_extents)
1093 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1094 smb_file_src, smb_file_target, off, len, destoff);
1098 /* force revalidate of size and timestamps of target file now
1099 that target is updated on the server */
1100 CIFS_I(target_inode)->time = 0;
1101 /* although unlocking in the reverse order from locking is not
1102 strictly necessary here it is a little cleaner to be consistent */
1103 unlock_two_nondirectories(src_inode, target_inode);
1106 return rc < 0 ? rc : len;
1109 ssize_t cifs_file_copychunk_range(unsigned int xid,
1110 struct file *src_file, loff_t off,
1111 struct file *dst_file, loff_t destoff,
1112 size_t len, unsigned int flags)
1114 struct inode *src_inode = file_inode(src_file);
1115 struct inode *target_inode = file_inode(dst_file);
1116 struct cifsFileInfo *smb_file_src;
1117 struct cifsFileInfo *smb_file_target;
1118 struct cifs_tcon *src_tcon;
1119 struct cifs_tcon *target_tcon;
1122 cifs_dbg(FYI, "copychunk range\n");
1124 if (!src_file->private_data || !dst_file->private_data) {
1126 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1131 smb_file_target = dst_file->private_data;
1132 smb_file_src = src_file->private_data;
1133 src_tcon = tlink_tcon(smb_file_src->tlink);
1134 target_tcon = tlink_tcon(smb_file_target->tlink);
1136 if (src_tcon->ses != target_tcon->ses) {
1137 cifs_dbg(VFS, "source and target of copy not on same server\n");
1142 if (!target_tcon->ses->server->ops->copychunk_range)
1146 * Note: cifs case is easier than btrfs since server responsible for
1147 * checks for proper open modes and file type and if it wants
1148 * server could even support copy of range where source = target
1150 lock_two_nondirectories(target_inode, src_inode);
1152 cifs_dbg(FYI, "about to flush pages\n");
1153 /* should we flush first and last page first */
1154 truncate_inode_pages(&target_inode->i_data, 0);
1156 rc = file_modified(dst_file);
1158 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1159 smb_file_src, smb_file_target, off, len, destoff);
1161 file_accessed(src_file);
1163 /* force revalidate of size and timestamps of target file now
1164 * that target is updated on the server
1166 CIFS_I(target_inode)->time = 0;
1167 /* although unlocking in the reverse order from locking is not
1168 * strictly necessary here it is a little cleaner to be consistent
1170 unlock_two_nondirectories(src_inode, target_inode);
1177 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1178 * is a dummy operation.
1180 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1182 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1188 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1189 struct file *dst_file, loff_t destoff,
1190 size_t len, unsigned int flags)
1192 unsigned int xid = get_xid();
1195 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1199 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1200 rc = generic_copy_file_range(src_file, off, dst_file,
1201 destoff, len, flags);
1205 const struct file_operations cifs_file_ops = {
1206 .read_iter = cifs_loose_read_iter,
1207 .write_iter = cifs_file_write_iter,
1209 .release = cifs_close,
1211 .fsync = cifs_fsync,
1212 .flush = cifs_flush,
1213 .mmap = cifs_file_mmap,
1214 .splice_read = generic_file_splice_read,
1215 .splice_write = iter_file_splice_write,
1216 .llseek = cifs_llseek,
1217 .unlocked_ioctl = cifs_ioctl,
1218 .copy_file_range = cifs_copy_file_range,
1219 .remap_file_range = cifs_remap_file_range,
1220 .setlease = cifs_setlease,
1221 .fallocate = cifs_fallocate,
1224 const struct file_operations cifs_file_strict_ops = {
1225 .read_iter = cifs_strict_readv,
1226 .write_iter = cifs_strict_writev,
1228 .release = cifs_close,
1230 .fsync = cifs_strict_fsync,
1231 .flush = cifs_flush,
1232 .mmap = cifs_file_strict_mmap,
1233 .splice_read = generic_file_splice_read,
1234 .splice_write = iter_file_splice_write,
1235 .llseek = cifs_llseek,
1236 .unlocked_ioctl = cifs_ioctl,
1237 .copy_file_range = cifs_copy_file_range,
1238 .remap_file_range = cifs_remap_file_range,
1239 .setlease = cifs_setlease,
1240 .fallocate = cifs_fallocate,
1243 const struct file_operations cifs_file_direct_ops = {
1244 .read_iter = cifs_direct_readv,
1245 .write_iter = cifs_direct_writev,
1247 .release = cifs_close,
1249 .fsync = cifs_fsync,
1250 .flush = cifs_flush,
1251 .mmap = cifs_file_mmap,
1252 .splice_read = generic_file_splice_read,
1253 .splice_write = iter_file_splice_write,
1254 .unlocked_ioctl = cifs_ioctl,
1255 .copy_file_range = cifs_copy_file_range,
1256 .remap_file_range = cifs_remap_file_range,
1257 .llseek = cifs_llseek,
1258 .setlease = cifs_setlease,
1259 .fallocate = cifs_fallocate,
1262 const struct file_operations cifs_file_nobrl_ops = {
1263 .read_iter = cifs_loose_read_iter,
1264 .write_iter = cifs_file_write_iter,
1266 .release = cifs_close,
1267 .fsync = cifs_fsync,
1268 .flush = cifs_flush,
1269 .mmap = cifs_file_mmap,
1270 .splice_read = generic_file_splice_read,
1271 .splice_write = iter_file_splice_write,
1272 .llseek = cifs_llseek,
1273 .unlocked_ioctl = cifs_ioctl,
1274 .copy_file_range = cifs_copy_file_range,
1275 .remap_file_range = cifs_remap_file_range,
1276 .setlease = cifs_setlease,
1277 .fallocate = cifs_fallocate,
1280 const struct file_operations cifs_file_strict_nobrl_ops = {
1281 .read_iter = cifs_strict_readv,
1282 .write_iter = cifs_strict_writev,
1284 .release = cifs_close,
1285 .fsync = cifs_strict_fsync,
1286 .flush = cifs_flush,
1287 .mmap = cifs_file_strict_mmap,
1288 .splice_read = generic_file_splice_read,
1289 .splice_write = iter_file_splice_write,
1290 .llseek = cifs_llseek,
1291 .unlocked_ioctl = cifs_ioctl,
1292 .copy_file_range = cifs_copy_file_range,
1293 .remap_file_range = cifs_remap_file_range,
1294 .setlease = cifs_setlease,
1295 .fallocate = cifs_fallocate,
1298 const struct file_operations cifs_file_direct_nobrl_ops = {
1299 .read_iter = cifs_direct_readv,
1300 .write_iter = cifs_direct_writev,
1302 .release = cifs_close,
1303 .fsync = cifs_fsync,
1304 .flush = cifs_flush,
1305 .mmap = cifs_file_mmap,
1306 .splice_read = generic_file_splice_read,
1307 .splice_write = iter_file_splice_write,
1308 .unlocked_ioctl = cifs_ioctl,
1309 .copy_file_range = cifs_copy_file_range,
1310 .remap_file_range = cifs_remap_file_range,
1311 .llseek = cifs_llseek,
1312 .setlease = cifs_setlease,
1313 .fallocate = cifs_fallocate,
1316 const struct file_operations cifs_dir_ops = {
1317 .iterate_shared = cifs_readdir,
1318 .release = cifs_closedir,
1319 .read = generic_read_dir,
1320 .unlocked_ioctl = cifs_ioctl,
1321 .copy_file_range = cifs_copy_file_range,
1322 .remap_file_range = cifs_remap_file_range,
1323 .llseek = generic_file_llseek,
1324 .fsync = cifs_dir_fsync,
1328 cifs_init_once(void *inode)
1330 struct cifsInodeInfo *cifsi = inode;
1332 inode_init_once(&cifsi->vfs_inode);
1333 init_rwsem(&cifsi->lock_sem);
1337 cifs_init_inodecache(void)
1339 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1340 sizeof(struct cifsInodeInfo),
1341 0, (SLAB_RECLAIM_ACCOUNT|
1342 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1344 if (cifs_inode_cachep == NULL)
1351 cifs_destroy_inodecache(void)
1354 * Make sure all delayed rcu free inodes are flushed before we
1358 kmem_cache_destroy(cifs_inode_cachep);
1362 cifs_init_request_bufs(void)
1365 * SMB2 maximum header size is bigger than CIFS one - no problems to
1366 * allocate some more bytes for CIFS.
1368 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1370 if (CIFSMaxBufSize < 8192) {
1371 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1372 Unicode path name has to fit in any SMB/CIFS path based frames */
1373 CIFSMaxBufSize = 8192;
1374 } else if (CIFSMaxBufSize > 1024*127) {
1375 CIFSMaxBufSize = 1024 * 127;
1377 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1380 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1381 CIFSMaxBufSize, CIFSMaxBufSize);
1383 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1384 CIFSMaxBufSize + max_hdr_size, 0,
1385 SLAB_HWCACHE_ALIGN, 0,
1386 CIFSMaxBufSize + max_hdr_size,
1388 if (cifs_req_cachep == NULL)
1391 if (cifs_min_rcv < 1)
1393 else if (cifs_min_rcv > 64) {
1395 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1398 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1401 if (cifs_req_poolp == NULL) {
1402 kmem_cache_destroy(cifs_req_cachep);
1405 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1406 almost all handle based requests (but not write response, nor is it
1407 sufficient for path based requests). A smaller size would have
1408 been more efficient (compacting multiple slab items on one 4k page)
1409 for the case in which debug was on, but this larger size allows
1410 more SMBs to use small buffer alloc and is still much more
1411 efficient to alloc 1 per page off the slab compared to 17K (5page)
1412 alloc of large cifs buffers even when page debugging is on */
1413 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1414 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1415 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1416 if (cifs_sm_req_cachep == NULL) {
1417 mempool_destroy(cifs_req_poolp);
1418 kmem_cache_destroy(cifs_req_cachep);
1422 if (cifs_min_small < 2)
1424 else if (cifs_min_small > 256) {
1425 cifs_min_small = 256;
1426 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1429 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1430 cifs_sm_req_cachep);
1432 if (cifs_sm_req_poolp == NULL) {
1433 mempool_destroy(cifs_req_poolp);
1434 kmem_cache_destroy(cifs_req_cachep);
1435 kmem_cache_destroy(cifs_sm_req_cachep);
1443 cifs_destroy_request_bufs(void)
1445 mempool_destroy(cifs_req_poolp);
1446 kmem_cache_destroy(cifs_req_cachep);
1447 mempool_destroy(cifs_sm_req_poolp);
1448 kmem_cache_destroy(cifs_sm_req_cachep);
1452 cifs_init_mids(void)
1454 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1455 sizeof(struct mid_q_entry), 0,
1456 SLAB_HWCACHE_ALIGN, NULL);
1457 if (cifs_mid_cachep == NULL)
1460 /* 3 is a reasonable minimum number of simultaneous operations */
1461 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1462 if (cifs_mid_poolp == NULL) {
1463 kmem_cache_destroy(cifs_mid_cachep);
1471 cifs_destroy_mids(void)
1473 mempool_destroy(cifs_mid_poolp);
1474 kmem_cache_destroy(cifs_mid_cachep);
1482 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1483 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1484 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1485 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1486 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1488 * Initialize Global counters
1490 atomic_set(&sesInfoAllocCount, 0);
1491 atomic_set(&tconInfoAllocCount, 0);
1492 atomic_set(&tcpSesAllocCount, 0);
1493 atomic_set(&tcpSesReconnectCount, 0);
1494 atomic_set(&tconInfoReconnectCount, 0);
1496 atomic_set(&bufAllocCount, 0);
1497 atomic_set(&smBufAllocCount, 0);
1498 #ifdef CONFIG_CIFS_STATS2
1499 atomic_set(&totBufAllocCount, 0);
1500 atomic_set(&totSmBufAllocCount, 0);
1501 if (slow_rsp_threshold < 1)
1502 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1503 else if (slow_rsp_threshold > 32767)
1505 "slow response threshold set higher than recommended (0 to 32767)\n");
1506 #endif /* CONFIG_CIFS_STATS2 */
1508 atomic_set(&midCount, 0);
1509 GlobalCurrentXid = 0;
1510 GlobalTotalActiveXid = 0;
1511 GlobalMaxActiveXid = 0;
1512 spin_lock_init(&cifs_tcp_ses_lock);
1513 spin_lock_init(&GlobalMid_Lock);
1515 cifs_lock_secret = get_random_u32();
1517 if (cifs_max_pending < 2) {
1518 cifs_max_pending = 2;
1519 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1520 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1521 cifs_max_pending = CIFS_MAX_REQ;
1522 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1526 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1529 goto out_clean_proc;
1533 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1534 * so that we don't launch too many worker threads but
1535 * Documentation/workqueue.txt recommends setting it to 0
1538 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1539 decrypt_wq = alloc_workqueue("smb3decryptd",
1540 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1543 goto out_destroy_cifsiod_wq;
1546 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1547 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1548 if (!cifsoplockd_wq) {
1550 goto out_destroy_decrypt_wq;
1553 rc = cifs_fscache_register();
1555 goto out_destroy_cifsoplockd_wq;
1557 rc = cifs_init_inodecache();
1559 goto out_unreg_fscache;
1561 rc = cifs_init_mids();
1563 goto out_destroy_inodecache;
1565 rc = cifs_init_request_bufs();
1567 goto out_destroy_mids;
1569 #ifdef CONFIG_CIFS_DFS_UPCALL
1570 rc = dfs_cache_init();
1572 goto out_destroy_request_bufs;
1573 #endif /* CONFIG_CIFS_DFS_UPCALL */
1574 #ifdef CONFIG_CIFS_UPCALL
1575 rc = init_cifs_spnego();
1577 goto out_destroy_dfs_cache;
1578 #endif /* CONFIG_CIFS_UPCALL */
1580 rc = init_cifs_idmap();
1582 goto out_register_key_type;
1584 rc = register_filesystem(&cifs_fs_type);
1586 goto out_init_cifs_idmap;
1588 rc = register_filesystem(&smb3_fs_type);
1590 unregister_filesystem(&cifs_fs_type);
1591 goto out_init_cifs_idmap;
1596 out_init_cifs_idmap:
1598 out_register_key_type:
1599 #ifdef CONFIG_CIFS_UPCALL
1601 out_destroy_dfs_cache:
1603 #ifdef CONFIG_CIFS_DFS_UPCALL
1604 dfs_cache_destroy();
1605 out_destroy_request_bufs:
1607 cifs_destroy_request_bufs();
1609 cifs_destroy_mids();
1610 out_destroy_inodecache:
1611 cifs_destroy_inodecache();
1613 cifs_fscache_unregister();
1614 out_destroy_cifsoplockd_wq:
1615 destroy_workqueue(cifsoplockd_wq);
1616 out_destroy_decrypt_wq:
1617 destroy_workqueue(decrypt_wq);
1618 out_destroy_cifsiod_wq:
1619 destroy_workqueue(cifsiod_wq);
1628 cifs_dbg(NOISY, "exit_smb3\n");
1629 unregister_filesystem(&cifs_fs_type);
1630 unregister_filesystem(&smb3_fs_type);
1631 cifs_dfs_release_automount_timer();
1633 #ifdef CONFIG_CIFS_UPCALL
1636 #ifdef CONFIG_CIFS_DFS_UPCALL
1637 dfs_cache_destroy();
1639 cifs_destroy_request_bufs();
1640 cifs_destroy_mids();
1641 cifs_destroy_inodecache();
1642 cifs_fscache_unregister();
1643 destroy_workqueue(cifsoplockd_wq);
1644 destroy_workqueue(decrypt_wq);
1645 destroy_workqueue(cifsiod_wq);
1649 MODULE_AUTHOR("Steve French");
1650 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1652 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1653 "also older servers complying with the SNIA CIFS Specification)");
1654 MODULE_VERSION(CIFS_VERSION);
1655 MODULE_SOFTDEP("pre: ecb");
1656 MODULE_SOFTDEP("pre: hmac");
1657 MODULE_SOFTDEP("pre: md4");
1658 MODULE_SOFTDEP("pre: md5");
1659 MODULE_SOFTDEP("pre: nls");
1660 MODULE_SOFTDEP("pre: aes");
1661 MODULE_SOFTDEP("pre: cmac");
1662 MODULE_SOFTDEP("pre: sha256");
1663 MODULE_SOFTDEP("pre: sha512");
1664 MODULE_SOFTDEP("pre: aead2");
1665 MODULE_SOFTDEP("pre: ccm");
1666 MODULE_SOFTDEP("pre: gcm");
1667 module_init(init_cifs)
1668 module_exit(exit_cifs)