4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/uuid.h>
41 #include <linux/xattr.h>
45 #define DECLARE_GLOBALS_HERE
47 #include "cifsproto.h"
48 #include "cifs_debug.h"
49 #include "cifs_fs_sb.h"
51 #include <linux/key-type.h>
52 #include "cifs_spnego.h"
55 #ifdef CONFIG_CIFS_DFS_UPCALL
56 #include "dfs_cache.h"
60 * DOS dates from 1980/1/1 through 2107/12/31
61 * Protocol specifications indicate the range should be to 119, which
62 * limits maximum year to 2099. But this range has not been checked.
64 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
65 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
66 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
70 bool enable_oplocks = true;
71 bool linuxExtEnabled = true;
72 bool lookupCacheEnabled = true;
73 bool disable_legacy_dialects; /* false by default */
74 unsigned int global_secflags = CIFSSEC_DEF;
75 /* unsigned int ntlmv2_support = 0; */
76 unsigned int sign_CIFS_PDUs = 1;
77 static const struct super_operations cifs_super_ops;
78 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
79 module_param(CIFSMaxBufSize, uint, 0444);
80 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
82 "Default: 16384 Range: 8192 to 130048");
83 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
84 module_param(cifs_min_rcv, uint, 0444);
85 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
87 unsigned int cifs_min_small = 30;
88 module_param(cifs_min_small, uint, 0444);
89 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
91 unsigned int cifs_max_pending = CIFS_MAX_REQ;
92 module_param(cifs_max_pending, uint, 0444);
93 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
94 "CIFS/SMB1 dialect (N/A for SMB3) "
95 "Default: 32767 Range: 2 to 32767.");
96 #ifdef CONFIG_CIFS_STATS2
97 unsigned int slow_rsp_threshold = 1;
98 module_param(slow_rsp_threshold, uint, 0644);
99 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
100 "before logging that a response is delayed. "
101 "Default: 1 (if set to 0 disables msg).");
104 module_param(enable_oplocks, bool, 0644);
105 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
107 module_param(disable_legacy_dialects, bool, 0644);
108 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
109 "helpful to restrict the ability to "
110 "override the default dialects (SMB2.1, "
111 "SMB3 and SMB3.02) on mount with old "
112 "dialects (CIFS/SMB1 and SMB2) since "
113 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
114 " and less secure. Default: n/N/0");
116 extern mempool_t *cifs_sm_req_poolp;
117 extern mempool_t *cifs_req_poolp;
118 extern mempool_t *cifs_mid_poolp;
120 struct workqueue_struct *cifsiod_wq;
121 struct workqueue_struct *decrypt_wq;
122 struct workqueue_struct *fileinfo_put_wq;
123 struct workqueue_struct *cifsoplockd_wq;
124 __u32 cifs_lock_secret;
127 * Bumps refcount for cifs super block.
128 * Note that it should be only called if a referece to VFS super block is
129 * already held, e.g. in open-type syscalls context. Otherwise it can race with
130 * atomic_dec_and_test in deactivate_locked_super.
133 cifs_sb_active(struct super_block *sb)
135 struct cifs_sb_info *server = CIFS_SB(sb);
137 if (atomic_inc_return(&server->active) == 1)
138 atomic_inc(&sb->s_active);
142 cifs_sb_deactive(struct super_block *sb)
144 struct cifs_sb_info *server = CIFS_SB(sb);
146 if (atomic_dec_and_test(&server->active))
147 deactivate_super(sb);
151 cifs_read_super(struct super_block *sb)
154 struct cifs_sb_info *cifs_sb;
155 struct cifs_tcon *tcon;
156 struct timespec64 ts;
159 cifs_sb = CIFS_SB(sb);
160 tcon = cifs_sb_master_tcon(cifs_sb);
162 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
163 sb->s_flags |= SB_POSIXACL;
165 if (tcon->snapshot_time)
166 sb->s_flags |= SB_RDONLY;
168 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
169 sb->s_maxbytes = MAX_LFS_FILESIZE;
171 sb->s_maxbytes = MAX_NON_LFS;
174 * Some very old servers like DOS and OS/2 used 2 second granularity
175 * (while all current servers use 100ns granularity - see MS-DTYP)
176 * but 1 second is the maximum allowed granularity for the VFS
177 * so for old servers set time granularity to 1 second while for
178 * everything else (current servers) set it to 100ns.
180 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
181 ((tcon->ses->capabilities &
182 tcon->ses->server->vals->cap_nt_find) == 0) &&
184 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
185 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
186 sb->s_time_min = ts.tv_sec;
187 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
188 cpu_to_le16(SMB_TIME_MAX), 0);
189 sb->s_time_max = ts.tv_sec;
192 * Almost every server, including all SMB2+, uses DCE TIME
193 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
195 sb->s_time_gran = 100;
196 ts = cifs_NTtimeToUnix(0);
197 sb->s_time_min = ts.tv_sec;
198 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
199 sb->s_time_max = ts.tv_sec;
202 sb->s_magic = CIFS_MAGIC_NUMBER;
203 sb->s_op = &cifs_super_ops;
204 sb->s_xattr = cifs_xattr_handlers;
205 rc = super_setup_bdi(sb);
208 /* tune readahead according to rsize */
209 sb->s_bdi->ra_pages = cifs_sb->rsize / PAGE_SIZE;
211 sb->s_blocksize = CIFS_MAX_MSGSIZE;
212 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
213 inode = cifs_root_iget(sb);
221 sb->s_d_op = &cifs_ci_dentry_ops;
223 sb->s_d_op = &cifs_dentry_ops;
225 sb->s_root = d_make_root(inode);
231 #ifdef CONFIG_CIFS_NFSD_EXPORT
232 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
233 cifs_dbg(FYI, "export ops supported\n");
234 sb->s_export_op = &cifs_export_ops;
236 #endif /* CONFIG_CIFS_NFSD_EXPORT */
241 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
245 static void cifs_kill_sb(struct super_block *sb)
247 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
249 cifs_umount(cifs_sb);
253 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
255 struct super_block *sb = dentry->d_sb;
256 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
257 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
258 struct TCP_Server_Info *server = tcon->ses->server;
264 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
266 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
268 buf->f_namelen = PATH_MAX;
270 buf->f_fsid.val[0] = tcon->vol_serial_number;
271 /* are using part of create time for more randomness, see man statfs */
272 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
274 buf->f_files = 0; /* undefined */
275 buf->f_ffree = 0; /* unlimited */
277 if (server->ops->queryfs)
278 rc = server->ops->queryfs(xid, tcon, buf);
284 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
286 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
287 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
288 struct TCP_Server_Info *server = tcon->ses->server;
290 if (server->ops->fallocate)
291 return server->ops->fallocate(file, tcon, mode, off, len);
296 static int cifs_permission(struct inode *inode, int mask)
298 struct cifs_sb_info *cifs_sb;
300 cifs_sb = CIFS_SB(inode->i_sb);
302 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
303 if ((mask & MAY_EXEC) && !execute_ok(inode))
307 } else /* file mode might have been restricted at mount time
308 on the client (above and beyond ACL on servers) for
309 servers which do not support setting and viewing mode bits,
310 so allowing client to check permissions is useful */
311 return generic_permission(inode, mask);
314 static struct kmem_cache *cifs_inode_cachep;
315 static struct kmem_cache *cifs_req_cachep;
316 static struct kmem_cache *cifs_mid_cachep;
317 static struct kmem_cache *cifs_sm_req_cachep;
318 mempool_t *cifs_sm_req_poolp;
319 mempool_t *cifs_req_poolp;
320 mempool_t *cifs_mid_poolp;
322 static struct inode *
323 cifs_alloc_inode(struct super_block *sb)
325 struct cifsInodeInfo *cifs_inode;
326 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
329 cifs_inode->cifsAttrs = 0x20; /* default */
330 cifs_inode->time = 0;
332 * Until the file is open and we have gotten oplock info back from the
333 * server, can not assume caching of file data or metadata.
335 cifs_set_oplock_level(cifs_inode, 0);
336 cifs_inode->flags = 0;
337 spin_lock_init(&cifs_inode->writers_lock);
338 cifs_inode->writers = 0;
339 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
340 cifs_inode->server_eof = 0;
341 cifs_inode->uniqueid = 0;
342 cifs_inode->createtime = 0;
343 cifs_inode->epoch = 0;
344 spin_lock_init(&cifs_inode->open_file_lock);
345 generate_random_uuid(cifs_inode->lease_key);
348 * Can not set i_flags here - they get immediately overwritten to zero
351 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
352 INIT_LIST_HEAD(&cifs_inode->openFileList);
353 INIT_LIST_HEAD(&cifs_inode->llist);
354 return &cifs_inode->vfs_inode;
358 cifs_free_inode(struct inode *inode)
360 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
364 cifs_evict_inode(struct inode *inode)
366 truncate_inode_pages_final(&inode->i_data);
368 cifs_fscache_release_inode_cookie(inode);
372 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
374 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
375 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
377 seq_puts(s, ",addr=");
379 switch (server->dstaddr.ss_family) {
381 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
384 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
385 if (sa6->sin6_scope_id)
386 seq_printf(s, "%%%u", sa6->sin6_scope_id);
389 seq_puts(s, "(unknown)");
392 seq_puts(s, ",rdma");
396 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
398 if (ses->sectype == Unspecified) {
399 if (ses->user_name == NULL)
400 seq_puts(s, ",sec=none");
404 seq_puts(s, ",sec=");
406 switch (ses->sectype) {
408 seq_puts(s, "lanman");
411 seq_puts(s, "ntlmv2");
417 seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
420 seq_puts(s, "ntlmssp");
423 /* shouldn't ever happen */
424 seq_puts(s, "unknown");
433 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
435 seq_puts(s, ",cache=");
437 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
438 seq_puts(s, "strict");
439 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
441 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
442 seq_puts(s, "singleclient"); /* assume only one client access */
443 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
444 seq_puts(s, "ro"); /* read only caching assumed */
446 seq_puts(s, "loose");
450 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
452 struct nls_table *def;
454 /* Display iocharset= option if it's not default charset */
455 def = load_nls_default();
457 seq_printf(s, ",iocharset=%s", cur->charset);
462 * cifs_show_options() is for displaying mount options in /proc/mounts.
463 * Not all settable options are displayed but most of the important
467 cifs_show_options(struct seq_file *s, struct dentry *root)
469 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
470 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
471 struct sockaddr *srcaddr;
472 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
474 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
475 cifs_show_security(s, tcon->ses);
476 cifs_show_cache_flavor(s, cifs_sb);
479 seq_puts(s, ",nolease");
480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
481 seq_puts(s, ",multiuser");
482 else if (tcon->ses->user_name)
483 seq_show_option(s, "username", tcon->ses->user_name);
485 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
486 seq_show_option(s, "domain", tcon->ses->domainName);
488 if (srcaddr->sa_family != AF_UNSPEC) {
489 struct sockaddr_in *saddr4;
490 struct sockaddr_in6 *saddr6;
491 saddr4 = (struct sockaddr_in *)srcaddr;
492 saddr6 = (struct sockaddr_in6 *)srcaddr;
493 if (srcaddr->sa_family == AF_INET6)
494 seq_printf(s, ",srcaddr=%pI6c",
496 else if (srcaddr->sa_family == AF_INET)
497 seq_printf(s, ",srcaddr=%pI4",
498 &saddr4->sin_addr.s_addr);
500 seq_printf(s, ",srcaddr=BAD-AF:%i",
501 (int)(srcaddr->sa_family));
504 seq_printf(s, ",uid=%u",
505 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
506 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
507 seq_puts(s, ",forceuid");
509 seq_puts(s, ",noforceuid");
511 seq_printf(s, ",gid=%u",
512 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
513 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
514 seq_puts(s, ",forcegid");
516 seq_puts(s, ",noforcegid");
518 cifs_show_address(s, tcon->ses->server);
521 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
522 cifs_sb->mnt_file_mode,
523 cifs_sb->mnt_dir_mode);
525 cifs_show_nls(s, cifs_sb->local_nls);
528 seq_puts(s, ",seal");
530 seq_puts(s, ",nocase");
531 if (tcon->local_lease)
532 seq_puts(s, ",locallease");
534 seq_puts(s, ",hard");
536 seq_puts(s, ",soft");
537 if (tcon->use_persistent)
538 seq_puts(s, ",persistenthandles");
539 else if (tcon->use_resilient)
540 seq_puts(s, ",resilienthandles");
541 if (tcon->posix_extensions)
542 seq_puts(s, ",posix");
543 else if (tcon->unix_ext)
544 seq_puts(s, ",unix");
546 seq_puts(s, ",nounix");
547 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
548 seq_puts(s, ",nodfs");
549 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
550 seq_puts(s, ",posixpaths");
551 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
552 seq_puts(s, ",setuids");
553 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
554 seq_puts(s, ",idsfromsid");
555 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
556 seq_puts(s, ",serverino");
557 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
558 seq_puts(s, ",rwpidforward");
559 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
560 seq_puts(s, ",forcemand");
561 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
562 seq_puts(s, ",nouser_xattr");
563 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
564 seq_puts(s, ",mapchars");
565 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
566 seq_puts(s, ",mapposix");
567 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
569 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
570 seq_puts(s, ",nobrl");
571 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
572 seq_puts(s, ",nohandlecache");
573 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
574 seq_puts(s, ",modefromsid");
575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
576 seq_puts(s, ",cifsacl");
577 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
578 seq_puts(s, ",dynperm");
579 if (root->d_sb->s_flags & SB_POSIXACL)
581 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
582 seq_puts(s, ",mfsymlinks");
583 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
585 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
586 seq_puts(s, ",nostrictsync");
587 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
588 seq_puts(s, ",noperm");
589 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
590 seq_printf(s, ",backupuid=%u",
591 from_kuid_munged(&init_user_ns,
592 cifs_sb->mnt_backupuid));
593 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
594 seq_printf(s, ",backupgid=%u",
595 from_kgid_munged(&init_user_ns,
596 cifs_sb->mnt_backupgid));
598 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
599 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
600 seq_printf(s, ",bsize=%u", cifs_sb->bsize);
601 if (tcon->ses->server->min_offload)
602 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
603 seq_printf(s, ",echo_interval=%lu",
604 tcon->ses->server->echo_interval / HZ);
606 /* Only display max_credits if it was overridden on mount */
607 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
608 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
610 if (tcon->snapshot_time)
611 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
612 if (tcon->handle_timeout)
613 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
614 /* convert actimeo and display it in seconds */
615 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
617 if (tcon->ses->chan_max > 1)
618 seq_printf(s, ",multichannel,max_channel=%zu",
619 tcon->ses->chan_max);
624 static void cifs_umount_begin(struct super_block *sb)
626 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
627 struct cifs_tcon *tcon;
632 tcon = cifs_sb_master_tcon(cifs_sb);
634 spin_lock(&cifs_tcp_ses_lock);
635 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
636 /* we have other mounts to same share or we have
637 already tried to force umount this and woken up
638 all waiting network requests, nothing to do */
639 spin_unlock(&cifs_tcp_ses_lock);
641 } else if (tcon->tc_count == 1)
642 tcon->tidStatus = CifsExiting;
643 spin_unlock(&cifs_tcp_ses_lock);
645 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
646 /* cancel_notify_requests(tcon); */
647 if (tcon->ses && tcon->ses->server) {
648 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
649 wake_up_all(&tcon->ses->server->request_q);
650 wake_up_all(&tcon->ses->server->response_q);
651 msleep(1); /* yield */
652 /* we have to kick the requests once more */
653 wake_up_all(&tcon->ses->server->response_q);
660 #ifdef CONFIG_CIFS_STATS2
661 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
668 static int cifs_remount(struct super_block *sb, int *flags, char *data)
671 *flags |= SB_NODIRATIME;
675 static int cifs_drop_inode(struct inode *inode)
677 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
679 /* no serverino => unconditional eviction */
680 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
681 generic_drop_inode(inode);
684 static const struct super_operations cifs_super_ops = {
685 .statfs = cifs_statfs,
686 .alloc_inode = cifs_alloc_inode,
687 .free_inode = cifs_free_inode,
688 .drop_inode = cifs_drop_inode,
689 .evict_inode = cifs_evict_inode,
690 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
691 function unless later we add lazy close of inodes or unless the
692 kernel forgets to call us with the same number of releases (closes)
694 .show_options = cifs_show_options,
695 .umount_begin = cifs_umount_begin,
696 .remount_fs = cifs_remount,
697 #ifdef CONFIG_CIFS_STATS2
698 .show_stats = cifs_show_stats,
703 * Get root dentry from superblock according to prefix path mount option.
704 * Return dentry with refcount + 1 on success and NULL otherwise.
706 static struct dentry *
707 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
709 struct dentry *dentry;
710 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
711 char *full_path = NULL;
715 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
716 return dget(sb->s_root);
718 full_path = cifs_build_path_to_root(vol, cifs_sb,
719 cifs_sb_master_tcon(cifs_sb), 0);
720 if (full_path == NULL)
721 return ERR_PTR(-ENOMEM);
723 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
725 sep = CIFS_DIR_SEP(cifs_sb);
726 dentry = dget(sb->s_root);
730 struct inode *dir = d_inode(dentry);
731 struct dentry *child;
733 if (!S_ISDIR(dir->i_mode)) {
735 dentry = ERR_PTR(-ENOTDIR);
739 /* skip separators */
746 while (*s && *s != sep)
749 child = lookup_positive_unlocked(p, dentry, s - p);
752 } while (!IS_ERR(dentry));
757 static int cifs_set_super(struct super_block *sb, void *data)
759 struct cifs_mnt_data *mnt_data = data;
760 sb->s_fs_info = mnt_data->cifs_sb;
761 return set_anon_super(sb, NULL);
764 static struct dentry *
765 cifs_smb3_do_mount(struct file_system_type *fs_type,
766 int flags, const char *dev_name, void *data, bool is_smb3)
769 struct super_block *sb;
770 struct cifs_sb_info *cifs_sb;
771 struct smb_vol *volume_info;
772 struct cifs_mnt_data mnt_data;
776 * Prints in Kernel / CIFS log the attempted mount operation
777 * If CIFS_DEBUG && cifs_FYI
780 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
782 cifs_info("Attempting to mount %s\n", dev_name);
784 volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
785 if (IS_ERR(volume_info))
786 return ERR_CAST(volume_info);
788 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
789 if (cifs_sb == NULL) {
790 root = ERR_PTR(-ENOMEM);
794 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
795 if (cifs_sb->mountdata == NULL) {
796 root = ERR_PTR(-ENOMEM);
800 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
806 rc = cifs_mount(cifs_sb, volume_info);
808 if (!(flags & SB_SILENT))
809 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
815 mnt_data.vol = volume_info;
816 mnt_data.cifs_sb = cifs_sb;
817 mnt_data.flags = flags;
819 /* BB should we make this contingent on mount parm? */
820 flags |= SB_NODIRATIME | SB_NOATIME;
822 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
825 cifs_umount(cifs_sb);
830 cifs_dbg(FYI, "Use existing superblock\n");
831 cifs_umount(cifs_sb);
833 rc = cifs_read_super(sb);
839 sb->s_flags |= SB_ACTIVE;
842 root = cifs_get_root(volume_info, sb);
846 cifs_dbg(FYI, "dentry root is: %p\n", root);
850 deactivate_locked_super(sb);
852 cifs_cleanup_volume_info(volume_info);
856 kfree(cifs_sb->prepath);
857 kfree(cifs_sb->mountdata);
860 unload_nls(volume_info->local_nls);
864 static struct dentry *
865 smb3_do_mount(struct file_system_type *fs_type,
866 int flags, const char *dev_name, void *data)
868 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, true);
871 static struct dentry *
872 cifs_do_mount(struct file_system_type *fs_type,
873 int flags, const char *dev_name, void *data)
875 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, false);
879 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
882 struct inode *inode = file_inode(iocb->ki_filp);
884 if (iocb->ki_filp->f_flags & O_DIRECT)
885 return cifs_user_readv(iocb, iter);
887 rc = cifs_revalidate_mapping(inode);
891 return generic_file_read_iter(iocb, iter);
894 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
896 struct inode *inode = file_inode(iocb->ki_filp);
897 struct cifsInodeInfo *cinode = CIFS_I(inode);
901 if (iocb->ki_filp->f_flags & O_DIRECT) {
902 written = cifs_user_writev(iocb, from);
903 if (written > 0 && CIFS_CACHE_READ(cinode)) {
904 cifs_zap_mapping(inode);
906 "Set no oplock for inode=%p after a write operation\n",
913 written = cifs_get_writer(cinode);
917 written = generic_file_write_iter(iocb, from);
919 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
922 rc = filemap_fdatawrite(inode->i_mapping);
924 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
928 cifs_put_writer(cinode);
932 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
934 struct cifsFileInfo *cfile = file->private_data;
935 struct cifs_tcon *tcon;
938 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
939 * the cached file length
941 if (whence != SEEK_SET && whence != SEEK_CUR) {
943 struct inode *inode = file_inode(file);
946 * We need to be sure that all dirty pages are written and the
947 * server has the newest file length.
949 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
950 inode->i_mapping->nrpages != 0) {
951 rc = filemap_fdatawait(inode->i_mapping);
953 mapping_set_error(inode->i_mapping, rc);
958 * Some applications poll for the file length in this strange
959 * way so we must seek to end on non-oplocked files by
960 * setting the revalidate time to zero.
962 CIFS_I(inode)->time = 0;
964 rc = cifs_revalidate_file_attr(file);
968 if (cfile && cfile->tlink) {
969 tcon = tlink_tcon(cfile->tlink);
970 if (tcon->ses->server->ops->llseek)
971 return tcon->ses->server->ops->llseek(file, tcon,
974 return generic_file_llseek(file, offset, whence);
978 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
981 * Note that this is called by vfs setlease with i_lock held to
982 * protect *lease from going away.
984 struct inode *inode = file_inode(file);
985 struct cifsFileInfo *cfile = file->private_data;
987 if (!(S_ISREG(inode->i_mode)))
990 /* Check if file is oplocked if this is request for new lease */
991 if (arg == F_UNLCK ||
992 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
993 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
994 return generic_setlease(file, arg, lease, priv);
995 else if (tlink_tcon(cfile->tlink)->local_lease &&
996 !CIFS_CACHE_READ(CIFS_I(inode)))
998 * If the server claims to support oplock on this file, then we
999 * still need to check oplock even if the local_lease mount
1000 * option is set, but there are servers which do not support
1001 * oplock for which this mount option may be useful if the user
1002 * knows that the file won't be changed on the server by anyone
1005 return generic_setlease(file, arg, lease, priv);
1010 struct file_system_type cifs_fs_type = {
1011 .owner = THIS_MODULE,
1013 .mount = cifs_do_mount,
1014 .kill_sb = cifs_kill_sb,
1017 MODULE_ALIAS_FS("cifs");
1019 static struct file_system_type smb3_fs_type = {
1020 .owner = THIS_MODULE,
1022 .mount = smb3_do_mount,
1023 .kill_sb = cifs_kill_sb,
1026 MODULE_ALIAS_FS("smb3");
1027 MODULE_ALIAS("smb3");
1029 const struct inode_operations cifs_dir_inode_ops = {
1030 .create = cifs_create,
1031 .atomic_open = cifs_atomic_open,
1032 .lookup = cifs_lookup,
1033 .getattr = cifs_getattr,
1034 .unlink = cifs_unlink,
1035 .link = cifs_hardlink,
1036 .mkdir = cifs_mkdir,
1037 .rmdir = cifs_rmdir,
1038 .rename = cifs_rename2,
1039 .permission = cifs_permission,
1040 .setattr = cifs_setattr,
1041 .symlink = cifs_symlink,
1042 .mknod = cifs_mknod,
1043 .listxattr = cifs_listxattr,
1046 const struct inode_operations cifs_file_inode_ops = {
1047 .setattr = cifs_setattr,
1048 .getattr = cifs_getattr,
1049 .permission = cifs_permission,
1050 .listxattr = cifs_listxattr,
1051 .fiemap = cifs_fiemap,
1054 const struct inode_operations cifs_symlink_inode_ops = {
1055 .get_link = cifs_get_link,
1056 .permission = cifs_permission,
1057 .listxattr = cifs_listxattr,
1060 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1061 struct file *dst_file, loff_t destoff, loff_t len,
1062 unsigned int remap_flags)
1064 struct inode *src_inode = file_inode(src_file);
1065 struct inode *target_inode = file_inode(dst_file);
1066 struct cifsFileInfo *smb_file_src = src_file->private_data;
1067 struct cifsFileInfo *smb_file_target;
1068 struct cifs_tcon *target_tcon;
1072 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1075 cifs_dbg(FYI, "clone range\n");
1079 if (!src_file->private_data || !dst_file->private_data) {
1081 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1085 smb_file_target = dst_file->private_data;
1086 target_tcon = tlink_tcon(smb_file_target->tlink);
1089 * Note: cifs case is easier than btrfs since server responsible for
1090 * checks for proper open modes and file type and if it wants
1091 * server could even support copy of range where source = target
1093 lock_two_nondirectories(target_inode, src_inode);
1096 len = src_inode->i_size - off;
1098 cifs_dbg(FYI, "about to flush pages\n");
1099 /* should we flush first and last page first */
1100 truncate_inode_pages_range(&target_inode->i_data, destoff,
1101 PAGE_ALIGN(destoff + len)-1);
1103 if (target_tcon->ses->server->ops->duplicate_extents)
1104 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1105 smb_file_src, smb_file_target, off, len, destoff);
1109 /* force revalidate of size and timestamps of target file now
1110 that target is updated on the server */
1111 CIFS_I(target_inode)->time = 0;
1112 /* although unlocking in the reverse order from locking is not
1113 strictly necessary here it is a little cleaner to be consistent */
1114 unlock_two_nondirectories(src_inode, target_inode);
1117 return rc < 0 ? rc : len;
1120 ssize_t cifs_file_copychunk_range(unsigned int xid,
1121 struct file *src_file, loff_t off,
1122 struct file *dst_file, loff_t destoff,
1123 size_t len, unsigned int flags)
1125 struct inode *src_inode = file_inode(src_file);
1126 struct inode *target_inode = file_inode(dst_file);
1127 struct cifsFileInfo *smb_file_src;
1128 struct cifsFileInfo *smb_file_target;
1129 struct cifs_tcon *src_tcon;
1130 struct cifs_tcon *target_tcon;
1133 cifs_dbg(FYI, "copychunk range\n");
1135 if (!src_file->private_data || !dst_file->private_data) {
1137 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1142 smb_file_target = dst_file->private_data;
1143 smb_file_src = src_file->private_data;
1144 src_tcon = tlink_tcon(smb_file_src->tlink);
1145 target_tcon = tlink_tcon(smb_file_target->tlink);
1147 if (src_tcon->ses != target_tcon->ses) {
1148 cifs_dbg(VFS, "source and target of copy not on same server\n");
1153 if (!target_tcon->ses->server->ops->copychunk_range)
1157 * Note: cifs case is easier than btrfs since server responsible for
1158 * checks for proper open modes and file type and if it wants
1159 * server could even support copy of range where source = target
1161 lock_two_nondirectories(target_inode, src_inode);
1163 cifs_dbg(FYI, "about to flush pages\n");
1164 /* should we flush first and last page first */
1165 truncate_inode_pages(&target_inode->i_data, 0);
1167 rc = file_modified(dst_file);
1169 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1170 smb_file_src, smb_file_target, off, len, destoff);
1172 file_accessed(src_file);
1174 /* force revalidate of size and timestamps of target file now
1175 * that target is updated on the server
1177 CIFS_I(target_inode)->time = 0;
1178 /* although unlocking in the reverse order from locking is not
1179 * strictly necessary here it is a little cleaner to be consistent
1181 unlock_two_nondirectories(src_inode, target_inode);
1188 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1189 * is a dummy operation.
1191 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1193 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1199 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1200 struct file *dst_file, loff_t destoff,
1201 size_t len, unsigned int flags)
1203 unsigned int xid = get_xid();
1206 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1210 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1211 rc = generic_copy_file_range(src_file, off, dst_file,
1212 destoff, len, flags);
1216 const struct file_operations cifs_file_ops = {
1217 .read_iter = cifs_loose_read_iter,
1218 .write_iter = cifs_file_write_iter,
1220 .release = cifs_close,
1222 .flock = cifs_flock,
1223 .fsync = cifs_fsync,
1224 .flush = cifs_flush,
1225 .mmap = cifs_file_mmap,
1226 .splice_read = generic_file_splice_read,
1227 .splice_write = iter_file_splice_write,
1228 .llseek = cifs_llseek,
1229 .unlocked_ioctl = cifs_ioctl,
1230 .copy_file_range = cifs_copy_file_range,
1231 .remap_file_range = cifs_remap_file_range,
1232 .setlease = cifs_setlease,
1233 .fallocate = cifs_fallocate,
1236 const struct file_operations cifs_file_strict_ops = {
1237 .read_iter = cifs_strict_readv,
1238 .write_iter = cifs_strict_writev,
1240 .release = cifs_close,
1242 .flock = cifs_flock,
1243 .fsync = cifs_strict_fsync,
1244 .flush = cifs_flush,
1245 .mmap = cifs_file_strict_mmap,
1246 .splice_read = generic_file_splice_read,
1247 .splice_write = iter_file_splice_write,
1248 .llseek = cifs_llseek,
1249 .unlocked_ioctl = cifs_ioctl,
1250 .copy_file_range = cifs_copy_file_range,
1251 .remap_file_range = cifs_remap_file_range,
1252 .setlease = cifs_setlease,
1253 .fallocate = cifs_fallocate,
1256 const struct file_operations cifs_file_direct_ops = {
1257 .read_iter = cifs_direct_readv,
1258 .write_iter = cifs_direct_writev,
1260 .release = cifs_close,
1262 .flock = cifs_flock,
1263 .fsync = cifs_fsync,
1264 .flush = cifs_flush,
1265 .mmap = cifs_file_mmap,
1266 .splice_read = generic_file_splice_read,
1267 .splice_write = iter_file_splice_write,
1268 .unlocked_ioctl = cifs_ioctl,
1269 .copy_file_range = cifs_copy_file_range,
1270 .remap_file_range = cifs_remap_file_range,
1271 .llseek = cifs_llseek,
1272 .setlease = cifs_setlease,
1273 .fallocate = cifs_fallocate,
1276 const struct file_operations cifs_file_nobrl_ops = {
1277 .read_iter = cifs_loose_read_iter,
1278 .write_iter = cifs_file_write_iter,
1280 .release = cifs_close,
1281 .fsync = cifs_fsync,
1282 .flush = cifs_flush,
1283 .mmap = cifs_file_mmap,
1284 .splice_read = generic_file_splice_read,
1285 .splice_write = iter_file_splice_write,
1286 .llseek = cifs_llseek,
1287 .unlocked_ioctl = cifs_ioctl,
1288 .copy_file_range = cifs_copy_file_range,
1289 .remap_file_range = cifs_remap_file_range,
1290 .setlease = cifs_setlease,
1291 .fallocate = cifs_fallocate,
1294 const struct file_operations cifs_file_strict_nobrl_ops = {
1295 .read_iter = cifs_strict_readv,
1296 .write_iter = cifs_strict_writev,
1298 .release = cifs_close,
1299 .fsync = cifs_strict_fsync,
1300 .flush = cifs_flush,
1301 .mmap = cifs_file_strict_mmap,
1302 .splice_read = generic_file_splice_read,
1303 .splice_write = iter_file_splice_write,
1304 .llseek = cifs_llseek,
1305 .unlocked_ioctl = cifs_ioctl,
1306 .copy_file_range = cifs_copy_file_range,
1307 .remap_file_range = cifs_remap_file_range,
1308 .setlease = cifs_setlease,
1309 .fallocate = cifs_fallocate,
1312 const struct file_operations cifs_file_direct_nobrl_ops = {
1313 .read_iter = cifs_direct_readv,
1314 .write_iter = cifs_direct_writev,
1316 .release = cifs_close,
1317 .fsync = cifs_fsync,
1318 .flush = cifs_flush,
1319 .mmap = cifs_file_mmap,
1320 .splice_read = generic_file_splice_read,
1321 .splice_write = iter_file_splice_write,
1322 .unlocked_ioctl = cifs_ioctl,
1323 .copy_file_range = cifs_copy_file_range,
1324 .remap_file_range = cifs_remap_file_range,
1325 .llseek = cifs_llseek,
1326 .setlease = cifs_setlease,
1327 .fallocate = cifs_fallocate,
1330 const struct file_operations cifs_dir_ops = {
1331 .iterate_shared = cifs_readdir,
1332 .release = cifs_closedir,
1333 .read = generic_read_dir,
1334 .unlocked_ioctl = cifs_ioctl,
1335 .copy_file_range = cifs_copy_file_range,
1336 .remap_file_range = cifs_remap_file_range,
1337 .llseek = generic_file_llseek,
1338 .fsync = cifs_dir_fsync,
1342 cifs_init_once(void *inode)
1344 struct cifsInodeInfo *cifsi = inode;
1346 inode_init_once(&cifsi->vfs_inode);
1347 init_rwsem(&cifsi->lock_sem);
1351 cifs_init_inodecache(void)
1353 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1354 sizeof(struct cifsInodeInfo),
1355 0, (SLAB_RECLAIM_ACCOUNT|
1356 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1358 if (cifs_inode_cachep == NULL)
1365 cifs_destroy_inodecache(void)
1368 * Make sure all delayed rcu free inodes are flushed before we
1372 kmem_cache_destroy(cifs_inode_cachep);
1376 cifs_init_request_bufs(void)
1379 * SMB2 maximum header size is bigger than CIFS one - no problems to
1380 * allocate some more bytes for CIFS.
1382 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1384 if (CIFSMaxBufSize < 8192) {
1385 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1386 Unicode path name has to fit in any SMB/CIFS path based frames */
1387 CIFSMaxBufSize = 8192;
1388 } else if (CIFSMaxBufSize > 1024*127) {
1389 CIFSMaxBufSize = 1024 * 127;
1391 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1394 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1395 CIFSMaxBufSize, CIFSMaxBufSize);
1397 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1398 CIFSMaxBufSize + max_hdr_size, 0,
1399 SLAB_HWCACHE_ALIGN, 0,
1400 CIFSMaxBufSize + max_hdr_size,
1402 if (cifs_req_cachep == NULL)
1405 if (cifs_min_rcv < 1)
1407 else if (cifs_min_rcv > 64) {
1409 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1412 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1415 if (cifs_req_poolp == NULL) {
1416 kmem_cache_destroy(cifs_req_cachep);
1419 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1420 almost all handle based requests (but not write response, nor is it
1421 sufficient for path based requests). A smaller size would have
1422 been more efficient (compacting multiple slab items on one 4k page)
1423 for the case in which debug was on, but this larger size allows
1424 more SMBs to use small buffer alloc and is still much more
1425 efficient to alloc 1 per page off the slab compared to 17K (5page)
1426 alloc of large cifs buffers even when page debugging is on */
1427 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1428 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1429 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1430 if (cifs_sm_req_cachep == NULL) {
1431 mempool_destroy(cifs_req_poolp);
1432 kmem_cache_destroy(cifs_req_cachep);
1436 if (cifs_min_small < 2)
1438 else if (cifs_min_small > 256) {
1439 cifs_min_small = 256;
1440 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1443 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1444 cifs_sm_req_cachep);
1446 if (cifs_sm_req_poolp == NULL) {
1447 mempool_destroy(cifs_req_poolp);
1448 kmem_cache_destroy(cifs_req_cachep);
1449 kmem_cache_destroy(cifs_sm_req_cachep);
1457 cifs_destroy_request_bufs(void)
1459 mempool_destroy(cifs_req_poolp);
1460 kmem_cache_destroy(cifs_req_cachep);
1461 mempool_destroy(cifs_sm_req_poolp);
1462 kmem_cache_destroy(cifs_sm_req_cachep);
1466 cifs_init_mids(void)
1468 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1469 sizeof(struct mid_q_entry), 0,
1470 SLAB_HWCACHE_ALIGN, NULL);
1471 if (cifs_mid_cachep == NULL)
1474 /* 3 is a reasonable minimum number of simultaneous operations */
1475 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1476 if (cifs_mid_poolp == NULL) {
1477 kmem_cache_destroy(cifs_mid_cachep);
1485 cifs_destroy_mids(void)
1487 mempool_destroy(cifs_mid_poolp);
1488 kmem_cache_destroy(cifs_mid_cachep);
1496 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1497 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1498 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1499 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1500 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1502 * Initialize Global counters
1504 atomic_set(&sesInfoAllocCount, 0);
1505 atomic_set(&tconInfoAllocCount, 0);
1506 atomic_set(&tcpSesAllocCount, 0);
1507 atomic_set(&tcpSesReconnectCount, 0);
1508 atomic_set(&tconInfoReconnectCount, 0);
1510 atomic_set(&bufAllocCount, 0);
1511 atomic_set(&smBufAllocCount, 0);
1512 #ifdef CONFIG_CIFS_STATS2
1513 atomic_set(&totBufAllocCount, 0);
1514 atomic_set(&totSmBufAllocCount, 0);
1515 if (slow_rsp_threshold < 1)
1516 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1517 else if (slow_rsp_threshold > 32767)
1519 "slow response threshold set higher than recommended (0 to 32767)\n");
1520 #endif /* CONFIG_CIFS_STATS2 */
1522 atomic_set(&midCount, 0);
1523 GlobalCurrentXid = 0;
1524 GlobalTotalActiveXid = 0;
1525 GlobalMaxActiveXid = 0;
1526 spin_lock_init(&cifs_tcp_ses_lock);
1527 spin_lock_init(&GlobalMid_Lock);
1529 cifs_lock_secret = get_random_u32();
1531 if (cifs_max_pending < 2) {
1532 cifs_max_pending = 2;
1533 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1534 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1535 cifs_max_pending = CIFS_MAX_REQ;
1536 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1540 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1543 goto out_clean_proc;
1547 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1548 * so that we don't launch too many worker threads but
1549 * Documentation/core-api/workqueue.rst recommends setting it to 0
1552 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1553 decrypt_wq = alloc_workqueue("smb3decryptd",
1554 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1557 goto out_destroy_cifsiod_wq;
1560 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1561 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1562 if (!fileinfo_put_wq) {
1564 goto out_destroy_decrypt_wq;
1567 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1568 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1569 if (!cifsoplockd_wq) {
1571 goto out_destroy_fileinfo_put_wq;
1574 rc = cifs_fscache_register();
1576 goto out_destroy_cifsoplockd_wq;
1578 rc = cifs_init_inodecache();
1580 goto out_unreg_fscache;
1582 rc = cifs_init_mids();
1584 goto out_destroy_inodecache;
1586 rc = cifs_init_request_bufs();
1588 goto out_destroy_mids;
1590 #ifdef CONFIG_CIFS_DFS_UPCALL
1591 rc = dfs_cache_init();
1593 goto out_destroy_request_bufs;
1594 #endif /* CONFIG_CIFS_DFS_UPCALL */
1595 #ifdef CONFIG_CIFS_UPCALL
1596 rc = init_cifs_spnego();
1598 goto out_destroy_dfs_cache;
1599 #endif /* CONFIG_CIFS_UPCALL */
1601 rc = init_cifs_idmap();
1603 goto out_register_key_type;
1605 rc = register_filesystem(&cifs_fs_type);
1607 goto out_init_cifs_idmap;
1609 rc = register_filesystem(&smb3_fs_type);
1611 unregister_filesystem(&cifs_fs_type);
1612 goto out_init_cifs_idmap;
1617 out_init_cifs_idmap:
1619 out_register_key_type:
1620 #ifdef CONFIG_CIFS_UPCALL
1622 out_destroy_dfs_cache:
1624 #ifdef CONFIG_CIFS_DFS_UPCALL
1625 dfs_cache_destroy();
1626 out_destroy_request_bufs:
1628 cifs_destroy_request_bufs();
1630 cifs_destroy_mids();
1631 out_destroy_inodecache:
1632 cifs_destroy_inodecache();
1634 cifs_fscache_unregister();
1635 out_destroy_cifsoplockd_wq:
1636 destroy_workqueue(cifsoplockd_wq);
1637 out_destroy_fileinfo_put_wq:
1638 destroy_workqueue(fileinfo_put_wq);
1639 out_destroy_decrypt_wq:
1640 destroy_workqueue(decrypt_wq);
1641 out_destroy_cifsiod_wq:
1642 destroy_workqueue(cifsiod_wq);
1651 cifs_dbg(NOISY, "exit_smb3\n");
1652 unregister_filesystem(&cifs_fs_type);
1653 unregister_filesystem(&smb3_fs_type);
1654 cifs_dfs_release_automount_timer();
1656 #ifdef CONFIG_CIFS_UPCALL
1659 #ifdef CONFIG_CIFS_DFS_UPCALL
1660 dfs_cache_destroy();
1662 cifs_destroy_request_bufs();
1663 cifs_destroy_mids();
1664 cifs_destroy_inodecache();
1665 cifs_fscache_unregister();
1666 destroy_workqueue(cifsoplockd_wq);
1667 destroy_workqueue(decrypt_wq);
1668 destroy_workqueue(fileinfo_put_wq);
1669 destroy_workqueue(cifsiod_wq);
1673 MODULE_AUTHOR("Steve French");
1674 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1676 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1677 "also older servers complying with the SNIA CIFS Specification)");
1678 MODULE_VERSION(CIFS_VERSION);
1679 MODULE_SOFTDEP("ecb");
1680 MODULE_SOFTDEP("hmac");
1681 MODULE_SOFTDEP("md4");
1682 MODULE_SOFTDEP("md5");
1683 MODULE_SOFTDEP("nls");
1684 MODULE_SOFTDEP("aes");
1685 MODULE_SOFTDEP("cmac");
1686 MODULE_SOFTDEP("sha256");
1687 MODULE_SOFTDEP("sha512");
1688 MODULE_SOFTDEP("aead2");
1689 MODULE_SOFTDEP("ccm");
1690 MODULE_SOFTDEP("gcm");
1691 module_init(init_cifs)
1692 module_exit(exit_cifs)