1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
4 * Copyright (C) 2019 Samsung Electronics Co., Ltd.
8 #include <linux/filelock.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
13 #include "vfs_cache.h"
16 #include "connection.h"
17 #include "mgmt/tree_connect.h"
18 #include "mgmt/user_session.h"
19 #include "smb_common.h"
21 #define S_DEL_PENDING 1
22 #define S_DEL_ON_CLS 2
23 #define S_DEL_ON_CLS_STREAM 8
25 static unsigned int inode_hash_mask __read_mostly;
26 static unsigned int inode_hash_shift __read_mostly;
27 static struct hlist_head *inode_hashtable __read_mostly;
28 static DEFINE_RWLOCK(inode_hash_lock);
30 static struct ksmbd_file_table global_ft;
31 static atomic_long_t fd_limit;
32 static struct kmem_cache *filp_cache;
34 void ksmbd_set_fd_limit(unsigned long limit)
36 limit = min(limit, get_max_files());
37 atomic_long_set(&fd_limit, limit);
40 static bool fd_limit_depleted(void)
42 long v = atomic_long_dec_return(&fd_limit);
46 atomic_long_inc(&fd_limit);
50 static void fd_limit_close(void)
52 atomic_long_inc(&fd_limit);
59 static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
63 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
65 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
66 return tmp & inode_hash_mask;
69 static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
71 struct hlist_head *head = inode_hashtable +
72 inode_hash(inode->i_sb, inode->i_ino);
73 struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
75 hlist_for_each_entry(ci, head, m_hash) {
76 if (ci->m_inode == inode) {
77 if (atomic_inc_not_zero(&ci->m_count))
85 static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
87 return __ksmbd_inode_lookup(file_inode(fp->filp));
90 static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
92 struct ksmbd_inode *ci;
94 read_lock(&inode_hash_lock);
95 ci = __ksmbd_inode_lookup(inode);
96 read_unlock(&inode_hash_lock);
100 int ksmbd_query_inode_status(struct inode *inode)
102 struct ksmbd_inode *ci;
103 int ret = KSMBD_INODE_STATUS_UNKNOWN;
105 read_lock(&inode_hash_lock);
106 ci = __ksmbd_inode_lookup(inode);
108 ret = KSMBD_INODE_STATUS_OK;
109 if (ci->m_flags & S_DEL_PENDING)
110 ret = KSMBD_INODE_STATUS_PENDING_DELETE;
111 atomic_dec(&ci->m_count);
113 read_unlock(&inode_hash_lock);
117 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
119 return (fp->f_ci->m_flags & S_DEL_PENDING);
122 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
124 fp->f_ci->m_flags |= S_DEL_PENDING;
127 void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
129 fp->f_ci->m_flags &= ~S_DEL_PENDING;
132 void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
135 if (ksmbd_stream_fd(fp)) {
136 fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
140 fp->f_ci->m_flags |= S_DEL_ON_CLS;
143 static void ksmbd_inode_hash(struct ksmbd_inode *ci)
145 struct hlist_head *b = inode_hashtable +
146 inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
148 hlist_add_head(&ci->m_hash, b);
151 static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
153 write_lock(&inode_hash_lock);
154 hlist_del_init(&ci->m_hash);
155 write_unlock(&inode_hash_lock);
158 static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
160 ci->m_inode = file_inode(fp->filp);
161 atomic_set(&ci->m_count, 1);
162 atomic_set(&ci->op_count, 0);
163 atomic_set(&ci->sop_count, 0);
166 INIT_LIST_HEAD(&ci->m_fp_list);
167 INIT_LIST_HEAD(&ci->m_op_list);
168 rwlock_init(&ci->m_lock);
172 static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
174 struct ksmbd_inode *ci, *tmpci;
177 read_lock(&inode_hash_lock);
178 ci = ksmbd_inode_lookup(fp);
179 read_unlock(&inode_hash_lock);
183 ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
187 rc = ksmbd_inode_init(ci, fp);
189 pr_err("inode initialized failed\n");
194 write_lock(&inode_hash_lock);
195 tmpci = ksmbd_inode_lookup(fp);
197 ksmbd_inode_hash(ci);
202 write_unlock(&inode_hash_lock);
206 static void ksmbd_inode_free(struct ksmbd_inode *ci)
208 ksmbd_inode_unhash(ci);
212 static void ksmbd_inode_put(struct ksmbd_inode *ci)
214 if (atomic_dec_and_test(&ci->m_count))
215 ksmbd_inode_free(ci);
218 int __init ksmbd_inode_hash_init(void)
221 unsigned long numentries = 16384;
222 unsigned long bucketsize = sizeof(struct hlist_head);
225 inode_hash_shift = ilog2(numentries);
226 inode_hash_mask = (1 << inode_hash_shift) - 1;
228 size = bucketsize << inode_hash_shift;
230 /* init master fp hash table */
231 inode_hashtable = vmalloc(size);
232 if (!inode_hashtable)
235 for (loop = 0; loop < (1U << inode_hash_shift); loop++)
236 INIT_HLIST_HEAD(&inode_hashtable[loop]);
240 void ksmbd_release_inode_hash(void)
242 vfree(inode_hashtable);
245 static void __ksmbd_inode_close(struct ksmbd_file *fp)
247 struct dentry *dir, *dentry;
248 struct ksmbd_inode *ci = fp->f_ci;
253 if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
254 ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
255 err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
259 pr_err("remove xattr failed : %s\n",
263 if (atomic_dec_and_test(&ci->m_count)) {
264 write_lock(&ci->m_lock);
265 if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
266 dentry = filp->f_path.dentry;
267 dir = dentry->d_parent;
268 ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
269 write_unlock(&ci->m_lock);
270 ksmbd_vfs_unlink(file_mnt_idmap(filp), dir, dentry);
271 write_lock(&ci->m_lock);
273 write_unlock(&ci->m_lock);
275 ksmbd_inode_free(ci);
279 static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
281 if (!has_file_id(fp->persistent_id))
284 write_lock(&global_ft.lock);
285 idr_remove(global_ft.idr, fp->persistent_id);
286 write_unlock(&global_ft.lock);
289 static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
291 if (!has_file_id(fp->volatile_id))
294 write_lock(&fp->f_ci->m_lock);
295 list_del_init(&fp->node);
296 write_unlock(&fp->f_ci->m_lock);
298 write_lock(&ft->lock);
299 idr_remove(ft->idr, fp->volatile_id);
300 write_unlock(&ft->lock);
303 static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
306 struct ksmbd_lock *smb_lock, *tmp_lock;
309 __ksmbd_remove_durable_fd(fp);
310 __ksmbd_remove_fd(ft, fp);
312 close_id_del_oplock(fp);
315 __ksmbd_inode_close(fp);
316 if (!IS_ERR_OR_NULL(filp))
319 /* because the reference count of fp is 0, it is guaranteed that
320 * there are not accesses to fp->lock_list.
322 list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
323 spin_lock(&fp->conn->llist_lock);
324 list_del(&smb_lock->clist);
325 spin_unlock(&fp->conn->llist_lock);
327 list_del(&smb_lock->flist);
328 locks_free_lock(smb_lock->fl);
332 if (ksmbd_stream_fd(fp))
333 kfree(fp->stream.name);
334 kmem_cache_free(filp_cache, fp);
337 static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
339 if (!atomic_inc_not_zero(&fp->refcount))
344 static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
347 struct ksmbd_file *fp;
349 if (!has_file_id(id))
352 read_lock(&ft->lock);
353 fp = idr_find(ft->idr, id);
355 fp = ksmbd_fp_get(fp);
356 read_unlock(&ft->lock);
360 static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
362 __ksmbd_close_fd(&work->sess->file_table, fp);
363 atomic_dec(&work->conn->stats.open_files_count);
366 static void set_close_state_blocked_works(struct ksmbd_file *fp)
368 struct ksmbd_work *cancel_work;
370 spin_lock(&fp->f_lock);
371 list_for_each_entry(cancel_work, &fp->blocked_works,
373 cancel_work->state = KSMBD_WORK_CLOSED;
374 cancel_work->cancel_fn(cancel_work->cancel_argv);
376 spin_unlock(&fp->f_lock);
379 int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
381 struct ksmbd_file *fp;
382 struct ksmbd_file_table *ft;
384 if (!has_file_id(id))
387 ft = &work->sess->file_table;
388 read_lock(&ft->lock);
389 fp = idr_find(ft->idr, id);
391 set_close_state_blocked_works(fp);
393 if (!atomic_dec_and_test(&fp->refcount))
396 read_unlock(&ft->lock);
401 __put_fd_final(work, fp);
405 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
410 if (!atomic_dec_and_test(&fp->refcount))
412 __put_fd_final(work, fp);
415 static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
419 if (fp->tcon != tcon)
424 struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
426 return __ksmbd_lookup_fd(&work->sess->file_table, id);
429 struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
431 struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
433 if (__sanity_check(work->tcon, fp))
436 ksmbd_fd_put(work, fp);
440 struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
443 struct ksmbd_file *fp;
445 if (!has_file_id(id)) {
446 id = work->compound_fid;
447 pid = work->compound_pfid;
450 fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
451 if (!__sanity_check(work->tcon, fp)) {
452 ksmbd_fd_put(work, fp);
455 if (fp->persistent_id != pid) {
456 ksmbd_fd_put(work, fp);
462 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
464 return __ksmbd_lookup_fd(&global_ft, id);
467 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
469 struct ksmbd_file *fp = NULL;
472 read_lock(&global_ft.lock);
473 idr_for_each_entry(global_ft.idr, fp, id) {
474 if (!memcmp(fp->create_guid,
476 SMB2_CREATE_GUID_SIZE)) {
477 fp = ksmbd_fp_get(fp);
481 read_unlock(&global_ft.lock);
486 struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
488 struct ksmbd_file *lfp;
489 struct ksmbd_inode *ci;
491 ci = ksmbd_inode_lookup_by_vfsinode(inode);
495 read_lock(&ci->m_lock);
496 list_for_each_entry(lfp, &ci->m_fp_list, node) {
497 if (inode == file_inode(lfp->filp)) {
498 atomic_dec(&ci->m_count);
499 lfp = ksmbd_fp_get(lfp);
500 read_unlock(&ci->m_lock);
504 atomic_dec(&ci->m_count);
505 read_unlock(&ci->m_lock);
509 #define OPEN_ID_TYPE_VOLATILE_ID (0)
510 #define OPEN_ID_TYPE_PERSISTENT_ID (1)
512 static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
514 if (type == OPEN_ID_TYPE_VOLATILE_ID)
515 fp->volatile_id = id;
516 if (type == OPEN_ID_TYPE_PERSISTENT_ID)
517 fp->persistent_id = id;
520 static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
526 if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
527 __open_id_set(fp, KSMBD_NO_FID, type);
531 idr_preload(GFP_KERNEL);
532 write_lock(&ft->lock);
533 ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
542 __open_id_set(fp, id, type);
543 write_unlock(&ft->lock);
548 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
550 __open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
551 return fp->persistent_id;
554 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
556 struct ksmbd_file *fp;
559 fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL);
561 pr_err("Failed to allocate memory\n");
562 return ERR_PTR(-ENOMEM);
565 INIT_LIST_HEAD(&fp->blocked_works);
566 INIT_LIST_HEAD(&fp->node);
567 INIT_LIST_HEAD(&fp->lock_list);
568 spin_lock_init(&fp->f_lock);
569 atomic_set(&fp->refcount, 1);
572 fp->conn = work->conn;
573 fp->tcon = work->tcon;
574 fp->volatile_id = KSMBD_NO_FID;
575 fp->persistent_id = KSMBD_NO_FID;
576 fp->f_ci = ksmbd_inode_get(fp);
583 ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
585 ksmbd_inode_put(fp->f_ci);
589 atomic_inc(&work->conn->stats.open_files_count);
593 kmem_cache_free(filp_cache, fp);
598 __close_file_table_ids(struct ksmbd_file_table *ft,
599 struct ksmbd_tree_connect *tcon,
600 bool (*skip)(struct ksmbd_tree_connect *tcon,
601 struct ksmbd_file *fp))
604 struct ksmbd_file *fp;
607 idr_for_each_entry(ft->idr, fp, id) {
611 set_close_state_blocked_works(fp);
613 if (!atomic_dec_and_test(&fp->refcount))
615 __ksmbd_close_fd(ft, fp);
621 static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
622 struct ksmbd_file *fp)
624 return fp->tcon != tcon;
627 static bool session_fd_check(struct ksmbd_tree_connect *tcon,
628 struct ksmbd_file *fp)
633 void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
635 int num = __close_file_table_ids(&work->sess->file_table,
639 atomic_sub(num, &work->conn->stats.open_files_count);
642 void ksmbd_close_session_fds(struct ksmbd_work *work)
644 int num = __close_file_table_ids(&work->sess->file_table,
648 atomic_sub(num, &work->conn->stats.open_files_count);
651 int ksmbd_init_global_file_table(void)
653 return ksmbd_init_file_table(&global_ft);
656 void ksmbd_free_global_file_table(void)
658 struct ksmbd_file *fp = NULL;
661 idr_for_each_entry(global_ft.idr, fp, id) {
662 __ksmbd_remove_durable_fd(fp);
663 kmem_cache_free(filp_cache, fp);
666 ksmbd_destroy_file_table(&global_ft);
669 int ksmbd_init_file_table(struct ksmbd_file_table *ft)
671 ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
676 rwlock_init(&ft->lock);
680 void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
685 __close_file_table_ids(ft, NULL, session_fd_check);
686 idr_destroy(ft->idr);
691 int ksmbd_init_file_cache(void)
693 filp_cache = kmem_cache_create("ksmbd_file_cache",
694 sizeof(struct ksmbd_file), 0,
695 SLAB_HWCACHE_ALIGN, NULL);
702 pr_err("failed to allocate file cache\n");
706 void ksmbd_exit_file_cache(void)
708 kmem_cache_destroy(filp_cache);