Merge tag 'mac80211-next-for-net-next-2020-04-25' of git://git.kernel.org/pub/scm...
[linux-2.6-microblaze.git] / fs / dcache.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * fs/dcache.c
4  *
5  * Complete reimplementation
6  * (C) 1997 Thomas Schoebel-Theuer,
7  * with heavy changes by Linus Torvalds
8  */
9
10 /*
11  * Notes on the allocation strategy:
12  *
13  * The dcache is a master of the icache - whenever a dcache entry
14  * exists, the inode will always exist. "iput()" is done either when
15  * the dcache entry is deleted or garbage collected.
16  */
17
18 #include <linux/ratelimit.h>
19 #include <linux/string.h>
20 #include <linux/mm.h>
21 #include <linux/fs.h>
22 #include <linux/fscrypt.h>
23 #include <linux/fsnotify.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/hash.h>
27 #include <linux/cache.h>
28 #include <linux/export.h>
29 #include <linux/security.h>
30 #include <linux/seqlock.h>
31 #include <linux/memblock.h>
32 #include <linux/bit_spinlock.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/list_lru.h>
35 #include "internal.h"
36 #include "mount.h"
37
38 /*
39  * Usage:
40  * dcache->d_inode->i_lock protects:
41  *   - i_dentry, d_u.d_alias, d_inode of aliases
42  * dcache_hash_bucket lock protects:
43  *   - the dcache hash table
44  * s_roots bl list spinlock protects:
45  *   - the s_roots list (see __d_drop)
46  * dentry->d_sb->s_dentry_lru_lock protects:
47  *   - the dcache lru lists and counters
48  * d_lock protects:
49  *   - d_flags
50  *   - d_name
51  *   - d_lru
52  *   - d_count
53  *   - d_unhashed()
54  *   - d_parent and d_subdirs
55  *   - childrens' d_child and d_parent
56  *   - d_u.d_alias, d_inode
57  *
58  * Ordering:
59  * dentry->d_inode->i_lock
60  *   dentry->d_lock
61  *     dentry->d_sb->s_dentry_lru_lock
62  *     dcache_hash_bucket lock
63  *     s_roots lock
64  *
65  * If there is an ancestor relationship:
66  * dentry->d_parent->...->d_parent->d_lock
67  *   ...
68  *     dentry->d_parent->d_lock
69  *       dentry->d_lock
70  *
71  * If no ancestor relationship:
72  * arbitrary, since it's serialized on rename_lock
73  */
74 int sysctl_vfs_cache_pressure __read_mostly = 100;
75 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
76
77 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
78
79 EXPORT_SYMBOL(rename_lock);
80
81 static struct kmem_cache *dentry_cache __read_mostly;
82
83 const struct qstr empty_name = QSTR_INIT("", 0);
84 EXPORT_SYMBOL(empty_name);
85 const struct qstr slash_name = QSTR_INIT("/", 1);
86 EXPORT_SYMBOL(slash_name);
87
88 /*
89  * This is the single most critical data structure when it comes
90  * to the dcache: the hashtable for lookups. Somebody should try
91  * to make this good - I've just made it work.
92  *
93  * This hash-function tries to avoid losing too many bits of hash
94  * information, yet avoid using a prime hash-size or similar.
95  */
96
97 static unsigned int d_hash_shift __read_mostly;
98
99 static struct hlist_bl_head *dentry_hashtable __read_mostly;
100
101 static inline struct hlist_bl_head *d_hash(unsigned int hash)
102 {
103         return dentry_hashtable + (hash >> d_hash_shift);
104 }
105
106 #define IN_LOOKUP_SHIFT 10
107 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
108
109 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
110                                         unsigned int hash)
111 {
112         hash += (unsigned long) parent / L1_CACHE_BYTES;
113         return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
114 }
115
116
117 /* Statistics gathering. */
118 struct dentry_stat_t dentry_stat = {
119         .age_limit = 45,
120 };
121
122 static DEFINE_PER_CPU(long, nr_dentry);
123 static DEFINE_PER_CPU(long, nr_dentry_unused);
124 static DEFINE_PER_CPU(long, nr_dentry_negative);
125
126 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
127
128 /*
129  * Here we resort to our own counters instead of using generic per-cpu counters
130  * for consistency with what the vfs inode code does. We are expected to harvest
131  * better code and performance by having our own specialized counters.
132  *
133  * Please note that the loop is done over all possible CPUs, not over all online
134  * CPUs. The reason for this is that we don't want to play games with CPUs going
135  * on and off. If one of them goes off, we will just keep their counters.
136  *
137  * glommer: See cffbc8a for details, and if you ever intend to change this,
138  * please update all vfs counters to match.
139  */
140 static long get_nr_dentry(void)
141 {
142         int i;
143         long sum = 0;
144         for_each_possible_cpu(i)
145                 sum += per_cpu(nr_dentry, i);
146         return sum < 0 ? 0 : sum;
147 }
148
149 static long get_nr_dentry_unused(void)
150 {
151         int i;
152         long sum = 0;
153         for_each_possible_cpu(i)
154                 sum += per_cpu(nr_dentry_unused, i);
155         return sum < 0 ? 0 : sum;
156 }
157
158 static long get_nr_dentry_negative(void)
159 {
160         int i;
161         long sum = 0;
162
163         for_each_possible_cpu(i)
164                 sum += per_cpu(nr_dentry_negative, i);
165         return sum < 0 ? 0 : sum;
166 }
167
168 int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
169                    size_t *lenp, loff_t *ppos)
170 {
171         dentry_stat.nr_dentry = get_nr_dentry();
172         dentry_stat.nr_unused = get_nr_dentry_unused();
173         dentry_stat.nr_negative = get_nr_dentry_negative();
174         return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
175 }
176 #endif
177
178 /*
179  * Compare 2 name strings, return 0 if they match, otherwise non-zero.
180  * The strings are both count bytes long, and count is non-zero.
181  */
182 #ifdef CONFIG_DCACHE_WORD_ACCESS
183
184 #include <asm/word-at-a-time.h>
185 /*
186  * NOTE! 'cs' and 'scount' come from a dentry, so it has a
187  * aligned allocation for this particular component. We don't
188  * strictly need the load_unaligned_zeropad() safety, but it
189  * doesn't hurt either.
190  *
191  * In contrast, 'ct' and 'tcount' can be from a pathname, and do
192  * need the careful unaligned handling.
193  */
194 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
195 {
196         unsigned long a,b,mask;
197
198         for (;;) {
199                 a = read_word_at_a_time(cs);
200                 b = load_unaligned_zeropad(ct);
201                 if (tcount < sizeof(unsigned long))
202                         break;
203                 if (unlikely(a != b))
204                         return 1;
205                 cs += sizeof(unsigned long);
206                 ct += sizeof(unsigned long);
207                 tcount -= sizeof(unsigned long);
208                 if (!tcount)
209                         return 0;
210         }
211         mask = bytemask_from_count(tcount);
212         return unlikely(!!((a ^ b) & mask));
213 }
214
215 #else
216
217 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
218 {
219         do {
220                 if (*cs != *ct)
221                         return 1;
222                 cs++;
223                 ct++;
224                 tcount--;
225         } while (tcount);
226         return 0;
227 }
228
229 #endif
230
231 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
232 {
233         /*
234          * Be careful about RCU walk racing with rename:
235          * use 'READ_ONCE' to fetch the name pointer.
236          *
237          * NOTE! Even if a rename will mean that the length
238          * was not loaded atomically, we don't care. The
239          * RCU walk will check the sequence count eventually,
240          * and catch it. And we won't overrun the buffer,
241          * because we're reading the name pointer atomically,
242          * and a dentry name is guaranteed to be properly
243          * terminated with a NUL byte.
244          *
245          * End result: even if 'len' is wrong, we'll exit
246          * early because the data cannot match (there can
247          * be no NUL in the ct/tcount data)
248          */
249         const unsigned char *cs = READ_ONCE(dentry->d_name.name);
250
251         return dentry_string_cmp(cs, ct, tcount);
252 }
253
254 struct external_name {
255         union {
256                 atomic_t count;
257                 struct rcu_head head;
258         } u;
259         unsigned char name[];
260 };
261
262 static inline struct external_name *external_name(struct dentry *dentry)
263 {
264         return container_of(dentry->d_name.name, struct external_name, name[0]);
265 }
266
267 static void __d_free(struct rcu_head *head)
268 {
269         struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
270
271         kmem_cache_free(dentry_cache, dentry); 
272 }
273
274 static void __d_free_external(struct rcu_head *head)
275 {
276         struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
277         kfree(external_name(dentry));
278         kmem_cache_free(dentry_cache, dentry);
279 }
280
281 static inline int dname_external(const struct dentry *dentry)
282 {
283         return dentry->d_name.name != dentry->d_iname;
284 }
285
286 void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
287 {
288         spin_lock(&dentry->d_lock);
289         name->name = dentry->d_name;
290         if (unlikely(dname_external(dentry))) {
291                 atomic_inc(&external_name(dentry)->u.count);
292         } else {
293                 memcpy(name->inline_name, dentry->d_iname,
294                        dentry->d_name.len + 1);
295                 name->name.name = name->inline_name;
296         }
297         spin_unlock(&dentry->d_lock);
298 }
299 EXPORT_SYMBOL(take_dentry_name_snapshot);
300
301 void release_dentry_name_snapshot(struct name_snapshot *name)
302 {
303         if (unlikely(name->name.name != name->inline_name)) {
304                 struct external_name *p;
305                 p = container_of(name->name.name, struct external_name, name[0]);
306                 if (unlikely(atomic_dec_and_test(&p->u.count)))
307                         kfree_rcu(p, u.head);
308         }
309 }
310 EXPORT_SYMBOL(release_dentry_name_snapshot);
311
312 static inline void __d_set_inode_and_type(struct dentry *dentry,
313                                           struct inode *inode,
314                                           unsigned type_flags)
315 {
316         unsigned flags;
317
318         dentry->d_inode = inode;
319         flags = READ_ONCE(dentry->d_flags);
320         flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
321         flags |= type_flags;
322         smp_store_release(&dentry->d_flags, flags);
323 }
324
325 static inline void __d_clear_type_and_inode(struct dentry *dentry)
326 {
327         unsigned flags = READ_ONCE(dentry->d_flags);
328
329         flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
330         WRITE_ONCE(dentry->d_flags, flags);
331         dentry->d_inode = NULL;
332         if (dentry->d_flags & DCACHE_LRU_LIST)
333                 this_cpu_inc(nr_dentry_negative);
334 }
335
336 static void dentry_free(struct dentry *dentry)
337 {
338         WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
339         if (unlikely(dname_external(dentry))) {
340                 struct external_name *p = external_name(dentry);
341                 if (likely(atomic_dec_and_test(&p->u.count))) {
342                         call_rcu(&dentry->d_u.d_rcu, __d_free_external);
343                         return;
344                 }
345         }
346         /* if dentry was never visible to RCU, immediate free is OK */
347         if (dentry->d_flags & DCACHE_NORCU)
348                 __d_free(&dentry->d_u.d_rcu);
349         else
350                 call_rcu(&dentry->d_u.d_rcu, __d_free);
351 }
352
353 /*
354  * Release the dentry's inode, using the filesystem
355  * d_iput() operation if defined.
356  */
357 static void dentry_unlink_inode(struct dentry * dentry)
358         __releases(dentry->d_lock)
359         __releases(dentry->d_inode->i_lock)
360 {
361         struct inode *inode = dentry->d_inode;
362
363         raw_write_seqcount_begin(&dentry->d_seq);
364         __d_clear_type_and_inode(dentry);
365         hlist_del_init(&dentry->d_u.d_alias);
366         raw_write_seqcount_end(&dentry->d_seq);
367         spin_unlock(&dentry->d_lock);
368         spin_unlock(&inode->i_lock);
369         if (!inode->i_nlink)
370                 fsnotify_inoderemove(inode);
371         if (dentry->d_op && dentry->d_op->d_iput)
372                 dentry->d_op->d_iput(dentry, inode);
373         else
374                 iput(inode);
375 }
376
377 /*
378  * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
379  * is in use - which includes both the "real" per-superblock
380  * LRU list _and_ the DCACHE_SHRINK_LIST use.
381  *
382  * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
383  * on the shrink list (ie not on the superblock LRU list).
384  *
385  * The per-cpu "nr_dentry_unused" counters are updated with
386  * the DCACHE_LRU_LIST bit.
387  *
388  * The per-cpu "nr_dentry_negative" counters are only updated
389  * when deleted from or added to the per-superblock LRU list, not
390  * from/to the shrink list. That is to avoid an unneeded dec/inc
391  * pair when moving from LRU to shrink list in select_collect().
392  *
393  * These helper functions make sure we always follow the
394  * rules. d_lock must be held by the caller.
395  */
396 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
397 static void d_lru_add(struct dentry *dentry)
398 {
399         D_FLAG_VERIFY(dentry, 0);
400         dentry->d_flags |= DCACHE_LRU_LIST;
401         this_cpu_inc(nr_dentry_unused);
402         if (d_is_negative(dentry))
403                 this_cpu_inc(nr_dentry_negative);
404         WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
405 }
406
407 static void d_lru_del(struct dentry *dentry)
408 {
409         D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
410         dentry->d_flags &= ~DCACHE_LRU_LIST;
411         this_cpu_dec(nr_dentry_unused);
412         if (d_is_negative(dentry))
413                 this_cpu_dec(nr_dentry_negative);
414         WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
415 }
416
417 static void d_shrink_del(struct dentry *dentry)
418 {
419         D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
420         list_del_init(&dentry->d_lru);
421         dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
422         this_cpu_dec(nr_dentry_unused);
423 }
424
425 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
426 {
427         D_FLAG_VERIFY(dentry, 0);
428         list_add(&dentry->d_lru, list);
429         dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
430         this_cpu_inc(nr_dentry_unused);
431 }
432
433 /*
434  * These can only be called under the global LRU lock, ie during the
435  * callback for freeing the LRU list. "isolate" removes it from the
436  * LRU lists entirely, while shrink_move moves it to the indicated
437  * private list.
438  */
439 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
440 {
441         D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
442         dentry->d_flags &= ~DCACHE_LRU_LIST;
443         this_cpu_dec(nr_dentry_unused);
444         if (d_is_negative(dentry))
445                 this_cpu_dec(nr_dentry_negative);
446         list_lru_isolate(lru, &dentry->d_lru);
447 }
448
449 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
450                               struct list_head *list)
451 {
452         D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
453         dentry->d_flags |= DCACHE_SHRINK_LIST;
454         if (d_is_negative(dentry))
455                 this_cpu_dec(nr_dentry_negative);
456         list_lru_isolate_move(lru, &dentry->d_lru, list);
457 }
458
459 /**
460  * d_drop - drop a dentry
461  * @dentry: dentry to drop
462  *
463  * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
464  * be found through a VFS lookup any more. Note that this is different from
465  * deleting the dentry - d_delete will try to mark the dentry negative if
466  * possible, giving a successful _negative_ lookup, while d_drop will
467  * just make the cache lookup fail.
468  *
469  * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
470  * reason (NFS timeouts or autofs deletes).
471  *
472  * __d_drop requires dentry->d_lock
473  * ___d_drop doesn't mark dentry as "unhashed"
474  *   (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
475  */
476 static void ___d_drop(struct dentry *dentry)
477 {
478         struct hlist_bl_head *b;
479         /*
480          * Hashed dentries are normally on the dentry hashtable,
481          * with the exception of those newly allocated by
482          * d_obtain_root, which are always IS_ROOT:
483          */
484         if (unlikely(IS_ROOT(dentry)))
485                 b = &dentry->d_sb->s_roots;
486         else
487                 b = d_hash(dentry->d_name.hash);
488
489         hlist_bl_lock(b);
490         __hlist_bl_del(&dentry->d_hash);
491         hlist_bl_unlock(b);
492 }
493
494 void __d_drop(struct dentry *dentry)
495 {
496         if (!d_unhashed(dentry)) {
497                 ___d_drop(dentry);
498                 dentry->d_hash.pprev = NULL;
499                 write_seqcount_invalidate(&dentry->d_seq);
500         }
501 }
502 EXPORT_SYMBOL(__d_drop);
503
504 void d_drop(struct dentry *dentry)
505 {
506         spin_lock(&dentry->d_lock);
507         __d_drop(dentry);
508         spin_unlock(&dentry->d_lock);
509 }
510 EXPORT_SYMBOL(d_drop);
511
512 static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
513 {
514         struct dentry *next;
515         /*
516          * Inform d_walk() and shrink_dentry_list() that we are no longer
517          * attached to the dentry tree
518          */
519         dentry->d_flags |= DCACHE_DENTRY_KILLED;
520         if (unlikely(list_empty(&dentry->d_child)))
521                 return;
522         __list_del_entry(&dentry->d_child);
523         /*
524          * Cursors can move around the list of children.  While we'd been
525          * a normal list member, it didn't matter - ->d_child.next would've
526          * been updated.  However, from now on it won't be and for the
527          * things like d_walk() it might end up with a nasty surprise.
528          * Normally d_walk() doesn't care about cursors moving around -
529          * ->d_lock on parent prevents that and since a cursor has no children
530          * of its own, we get through it without ever unlocking the parent.
531          * There is one exception, though - if we ascend from a child that
532          * gets killed as soon as we unlock it, the next sibling is found
533          * using the value left in its ->d_child.next.  And if _that_
534          * pointed to a cursor, and cursor got moved (e.g. by lseek())
535          * before d_walk() regains parent->d_lock, we'll end up skipping
536          * everything the cursor had been moved past.
537          *
538          * Solution: make sure that the pointer left behind in ->d_child.next
539          * points to something that won't be moving around.  I.e. skip the
540          * cursors.
541          */
542         while (dentry->d_child.next != &parent->d_subdirs) {
543                 next = list_entry(dentry->d_child.next, struct dentry, d_child);
544                 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
545                         break;
546                 dentry->d_child.next = next->d_child.next;
547         }
548 }
549
550 static void __dentry_kill(struct dentry *dentry)
551 {
552         struct dentry *parent = NULL;
553         bool can_free = true;
554         if (!IS_ROOT(dentry))
555                 parent = dentry->d_parent;
556
557         /*
558          * The dentry is now unrecoverably dead to the world.
559          */
560         lockref_mark_dead(&dentry->d_lockref);
561
562         /*
563          * inform the fs via d_prune that this dentry is about to be
564          * unhashed and destroyed.
565          */
566         if (dentry->d_flags & DCACHE_OP_PRUNE)
567                 dentry->d_op->d_prune(dentry);
568
569         if (dentry->d_flags & DCACHE_LRU_LIST) {
570                 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
571                         d_lru_del(dentry);
572         }
573         /* if it was on the hash then remove it */
574         __d_drop(dentry);
575         dentry_unlist(dentry, parent);
576         if (parent)
577                 spin_unlock(&parent->d_lock);
578         if (dentry->d_inode)
579                 dentry_unlink_inode(dentry);
580         else
581                 spin_unlock(&dentry->d_lock);
582         this_cpu_dec(nr_dentry);
583         if (dentry->d_op && dentry->d_op->d_release)
584                 dentry->d_op->d_release(dentry);
585
586         spin_lock(&dentry->d_lock);
587         if (dentry->d_flags & DCACHE_SHRINK_LIST) {
588                 dentry->d_flags |= DCACHE_MAY_FREE;
589                 can_free = false;
590         }
591         spin_unlock(&dentry->d_lock);
592         if (likely(can_free))
593                 dentry_free(dentry);
594         cond_resched();
595 }
596
597 static struct dentry *__lock_parent(struct dentry *dentry)
598 {
599         struct dentry *parent;
600         rcu_read_lock();
601         spin_unlock(&dentry->d_lock);
602 again:
603         parent = READ_ONCE(dentry->d_parent);
604         spin_lock(&parent->d_lock);
605         /*
606          * We can't blindly lock dentry until we are sure
607          * that we won't violate the locking order.
608          * Any changes of dentry->d_parent must have
609          * been done with parent->d_lock held, so
610          * spin_lock() above is enough of a barrier
611          * for checking if it's still our child.
612          */
613         if (unlikely(parent != dentry->d_parent)) {
614                 spin_unlock(&parent->d_lock);
615                 goto again;
616         }
617         rcu_read_unlock();
618         if (parent != dentry)
619                 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
620         else
621                 parent = NULL;
622         return parent;
623 }
624
625 static inline struct dentry *lock_parent(struct dentry *dentry)
626 {
627         struct dentry *parent = dentry->d_parent;
628         if (IS_ROOT(dentry))
629                 return NULL;
630         if (likely(spin_trylock(&parent->d_lock)))
631                 return parent;
632         return __lock_parent(dentry);
633 }
634
635 static inline bool retain_dentry(struct dentry *dentry)
636 {
637         WARN_ON(d_in_lookup(dentry));
638
639         /* Unreachable? Get rid of it */
640         if (unlikely(d_unhashed(dentry)))
641                 return false;
642
643         if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
644                 return false;
645
646         if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
647                 if (dentry->d_op->d_delete(dentry))
648                         return false;
649         }
650         /* retain; LRU fodder */
651         dentry->d_lockref.count--;
652         if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
653                 d_lru_add(dentry);
654         else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
655                 dentry->d_flags |= DCACHE_REFERENCED;
656         return true;
657 }
658
659 /*
660  * Finish off a dentry we've decided to kill.
661  * dentry->d_lock must be held, returns with it unlocked.
662  * Returns dentry requiring refcount drop, or NULL if we're done.
663  */
664 static struct dentry *dentry_kill(struct dentry *dentry)
665         __releases(dentry->d_lock)
666 {
667         struct inode *inode = dentry->d_inode;
668         struct dentry *parent = NULL;
669
670         if (inode && unlikely(!spin_trylock(&inode->i_lock)))
671                 goto slow_positive;
672
673         if (!IS_ROOT(dentry)) {
674                 parent = dentry->d_parent;
675                 if (unlikely(!spin_trylock(&parent->d_lock))) {
676                         parent = __lock_parent(dentry);
677                         if (likely(inode || !dentry->d_inode))
678                                 goto got_locks;
679                         /* negative that became positive */
680                         if (parent)
681                                 spin_unlock(&parent->d_lock);
682                         inode = dentry->d_inode;
683                         goto slow_positive;
684                 }
685         }
686         __dentry_kill(dentry);
687         return parent;
688
689 slow_positive:
690         spin_unlock(&dentry->d_lock);
691         spin_lock(&inode->i_lock);
692         spin_lock(&dentry->d_lock);
693         parent = lock_parent(dentry);
694 got_locks:
695         if (unlikely(dentry->d_lockref.count != 1)) {
696                 dentry->d_lockref.count--;
697         } else if (likely(!retain_dentry(dentry))) {
698                 __dentry_kill(dentry);
699                 return parent;
700         }
701         /* we are keeping it, after all */
702         if (inode)
703                 spin_unlock(&inode->i_lock);
704         if (parent)
705                 spin_unlock(&parent->d_lock);
706         spin_unlock(&dentry->d_lock);
707         return NULL;
708 }
709
710 /*
711  * Try to do a lockless dput(), and return whether that was successful.
712  *
713  * If unsuccessful, we return false, having already taken the dentry lock.
714  *
715  * The caller needs to hold the RCU read lock, so that the dentry is
716  * guaranteed to stay around even if the refcount goes down to zero!
717  */
718 static inline bool fast_dput(struct dentry *dentry)
719 {
720         int ret;
721         unsigned int d_flags;
722
723         /*
724          * If we have a d_op->d_delete() operation, we sould not
725          * let the dentry count go to zero, so use "put_or_lock".
726          */
727         if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
728                 return lockref_put_or_lock(&dentry->d_lockref);
729
730         /*
731          * .. otherwise, we can try to just decrement the
732          * lockref optimistically.
733          */
734         ret = lockref_put_return(&dentry->d_lockref);
735
736         /*
737          * If the lockref_put_return() failed due to the lock being held
738          * by somebody else, the fast path has failed. We will need to
739          * get the lock, and then check the count again.
740          */
741         if (unlikely(ret < 0)) {
742                 spin_lock(&dentry->d_lock);
743                 if (dentry->d_lockref.count > 1) {
744                         dentry->d_lockref.count--;
745                         spin_unlock(&dentry->d_lock);
746                         return true;
747                 }
748                 return false;
749         }
750
751         /*
752          * If we weren't the last ref, we're done.
753          */
754         if (ret)
755                 return true;
756
757         /*
758          * Careful, careful. The reference count went down
759          * to zero, but we don't hold the dentry lock, so
760          * somebody else could get it again, and do another
761          * dput(), and we need to not race with that.
762          *
763          * However, there is a very special and common case
764          * where we don't care, because there is nothing to
765          * do: the dentry is still hashed, it does not have
766          * a 'delete' op, and it's referenced and already on
767          * the LRU list.
768          *
769          * NOTE! Since we aren't locked, these values are
770          * not "stable". However, it is sufficient that at
771          * some point after we dropped the reference the
772          * dentry was hashed and the flags had the proper
773          * value. Other dentry users may have re-gotten
774          * a reference to the dentry and change that, but
775          * our work is done - we can leave the dentry
776          * around with a zero refcount.
777          */
778         smp_rmb();
779         d_flags = READ_ONCE(dentry->d_flags);
780         d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
781
782         /* Nothing to do? Dropping the reference was all we needed? */
783         if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
784                 return true;
785
786         /*
787          * Not the fast normal case? Get the lock. We've already decremented
788          * the refcount, but we'll need to re-check the situation after
789          * getting the lock.
790          */
791         spin_lock(&dentry->d_lock);
792
793         /*
794          * Did somebody else grab a reference to it in the meantime, and
795          * we're no longer the last user after all? Alternatively, somebody
796          * else could have killed it and marked it dead. Either way, we
797          * don't need to do anything else.
798          */
799         if (dentry->d_lockref.count) {
800                 spin_unlock(&dentry->d_lock);
801                 return true;
802         }
803
804         /*
805          * Re-get the reference we optimistically dropped. We hold the
806          * lock, and we just tested that it was zero, so we can just
807          * set it to 1.
808          */
809         dentry->d_lockref.count = 1;
810         return false;
811 }
812
813
814 /* 
815  * This is dput
816  *
817  * This is complicated by the fact that we do not want to put
818  * dentries that are no longer on any hash chain on the unused
819  * list: we'd much rather just get rid of them immediately.
820  *
821  * However, that implies that we have to traverse the dentry
822  * tree upwards to the parents which might _also_ now be
823  * scheduled for deletion (it may have been only waiting for
824  * its last child to go away).
825  *
826  * This tail recursion is done by hand as we don't want to depend
827  * on the compiler to always get this right (gcc generally doesn't).
828  * Real recursion would eat up our stack space.
829  */
830
831 /*
832  * dput - release a dentry
833  * @dentry: dentry to release 
834  *
835  * Release a dentry. This will drop the usage count and if appropriate
836  * call the dentry unlink method as well as removing it from the queues and
837  * releasing its resources. If the parent dentries were scheduled for release
838  * they too may now get deleted.
839  */
840 void dput(struct dentry *dentry)
841 {
842         while (dentry) {
843                 might_sleep();
844
845                 rcu_read_lock();
846                 if (likely(fast_dput(dentry))) {
847                         rcu_read_unlock();
848                         return;
849                 }
850
851                 /* Slow case: now with the dentry lock held */
852                 rcu_read_unlock();
853
854                 if (likely(retain_dentry(dentry))) {
855                         spin_unlock(&dentry->d_lock);
856                         return;
857                 }
858
859                 dentry = dentry_kill(dentry);
860         }
861 }
862 EXPORT_SYMBOL(dput);
863
864 static void __dput_to_list(struct dentry *dentry, struct list_head *list)
865 __must_hold(&dentry->d_lock)
866 {
867         if (dentry->d_flags & DCACHE_SHRINK_LIST) {
868                 /* let the owner of the list it's on deal with it */
869                 --dentry->d_lockref.count;
870         } else {
871                 if (dentry->d_flags & DCACHE_LRU_LIST)
872                         d_lru_del(dentry);
873                 if (!--dentry->d_lockref.count)
874                         d_shrink_add(dentry, list);
875         }
876 }
877
878 void dput_to_list(struct dentry *dentry, struct list_head *list)
879 {
880         rcu_read_lock();
881         if (likely(fast_dput(dentry))) {
882                 rcu_read_unlock();
883                 return;
884         }
885         rcu_read_unlock();
886         if (!retain_dentry(dentry))
887                 __dput_to_list(dentry, list);
888         spin_unlock(&dentry->d_lock);
889 }
890
891 /* This must be called with d_lock held */
892 static inline void __dget_dlock(struct dentry *dentry)
893 {
894         dentry->d_lockref.count++;
895 }
896
897 static inline void __dget(struct dentry *dentry)
898 {
899         lockref_get(&dentry->d_lockref);
900 }
901
902 struct dentry *dget_parent(struct dentry *dentry)
903 {
904         int gotref;
905         struct dentry *ret;
906         unsigned seq;
907
908         /*
909          * Do optimistic parent lookup without any
910          * locking.
911          */
912         rcu_read_lock();
913         seq = raw_seqcount_begin(&dentry->d_seq);
914         ret = READ_ONCE(dentry->d_parent);
915         gotref = lockref_get_not_zero(&ret->d_lockref);
916         rcu_read_unlock();
917         if (likely(gotref)) {
918                 if (!read_seqcount_retry(&dentry->d_seq, seq))
919                         return ret;
920                 dput(ret);
921         }
922
923 repeat:
924         /*
925          * Don't need rcu_dereference because we re-check it was correct under
926          * the lock.
927          */
928         rcu_read_lock();
929         ret = dentry->d_parent;
930         spin_lock(&ret->d_lock);
931         if (unlikely(ret != dentry->d_parent)) {
932                 spin_unlock(&ret->d_lock);
933                 rcu_read_unlock();
934                 goto repeat;
935         }
936         rcu_read_unlock();
937         BUG_ON(!ret->d_lockref.count);
938         ret->d_lockref.count++;
939         spin_unlock(&ret->d_lock);
940         return ret;
941 }
942 EXPORT_SYMBOL(dget_parent);
943
944 static struct dentry * __d_find_any_alias(struct inode *inode)
945 {
946         struct dentry *alias;
947
948         if (hlist_empty(&inode->i_dentry))
949                 return NULL;
950         alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
951         __dget(alias);
952         return alias;
953 }
954
955 /**
956  * d_find_any_alias - find any alias for a given inode
957  * @inode: inode to find an alias for
958  *
959  * If any aliases exist for the given inode, take and return a
960  * reference for one of them.  If no aliases exist, return %NULL.
961  */
962 struct dentry *d_find_any_alias(struct inode *inode)
963 {
964         struct dentry *de;
965
966         spin_lock(&inode->i_lock);
967         de = __d_find_any_alias(inode);
968         spin_unlock(&inode->i_lock);
969         return de;
970 }
971 EXPORT_SYMBOL(d_find_any_alias);
972
973 /**
974  * d_find_alias - grab a hashed alias of inode
975  * @inode: inode in question
976  *
977  * If inode has a hashed alias, or is a directory and has any alias,
978  * acquire the reference to alias and return it. Otherwise return NULL.
979  * Notice that if inode is a directory there can be only one alias and
980  * it can be unhashed only if it has no children, or if it is the root
981  * of a filesystem, or if the directory was renamed and d_revalidate
982  * was the first vfs operation to notice.
983  *
984  * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
985  * any other hashed alias over that one.
986  */
987 static struct dentry *__d_find_alias(struct inode *inode)
988 {
989         struct dentry *alias;
990
991         if (S_ISDIR(inode->i_mode))
992                 return __d_find_any_alias(inode);
993
994         hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
995                 spin_lock(&alias->d_lock);
996                 if (!d_unhashed(alias)) {
997                         __dget_dlock(alias);
998                         spin_unlock(&alias->d_lock);
999                         return alias;
1000                 }
1001                 spin_unlock(&alias->d_lock);
1002         }
1003         return NULL;
1004 }
1005
1006 struct dentry *d_find_alias(struct inode *inode)
1007 {
1008         struct dentry *de = NULL;
1009
1010         if (!hlist_empty(&inode->i_dentry)) {
1011                 spin_lock(&inode->i_lock);
1012                 de = __d_find_alias(inode);
1013                 spin_unlock(&inode->i_lock);
1014         }
1015         return de;
1016 }
1017 EXPORT_SYMBOL(d_find_alias);
1018
1019 /*
1020  *      Try to kill dentries associated with this inode.
1021  * WARNING: you must own a reference to inode.
1022  */
1023 void d_prune_aliases(struct inode *inode)
1024 {
1025         struct dentry *dentry;
1026 restart:
1027         spin_lock(&inode->i_lock);
1028         hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1029                 spin_lock(&dentry->d_lock);
1030                 if (!dentry->d_lockref.count) {
1031                         struct dentry *parent = lock_parent(dentry);
1032                         if (likely(!dentry->d_lockref.count)) {
1033                                 __dentry_kill(dentry);
1034                                 dput(parent);
1035                                 goto restart;
1036                         }
1037                         if (parent)
1038                                 spin_unlock(&parent->d_lock);
1039                 }
1040                 spin_unlock(&dentry->d_lock);
1041         }
1042         spin_unlock(&inode->i_lock);
1043 }
1044 EXPORT_SYMBOL(d_prune_aliases);
1045
1046 /*
1047  * Lock a dentry from shrink list.
1048  * Called under rcu_read_lock() and dentry->d_lock; the former
1049  * guarantees that nothing we access will be freed under us.
1050  * Note that dentry is *not* protected from concurrent dentry_kill(),
1051  * d_delete(), etc.
1052  *
1053  * Return false if dentry has been disrupted or grabbed, leaving
1054  * the caller to kick it off-list.  Otherwise, return true and have
1055  * that dentry's inode and parent both locked.
1056  */
1057 static bool shrink_lock_dentry(struct dentry *dentry)
1058 {
1059         struct inode *inode;
1060         struct dentry *parent;
1061
1062         if (dentry->d_lockref.count)
1063                 return false;
1064
1065         inode = dentry->d_inode;
1066         if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1067                 spin_unlock(&dentry->d_lock);
1068                 spin_lock(&inode->i_lock);
1069                 spin_lock(&dentry->d_lock);
1070                 if (unlikely(dentry->d_lockref.count))
1071                         goto out;
1072                 /* changed inode means that somebody had grabbed it */
1073                 if (unlikely(inode != dentry->d_inode))
1074                         goto out;
1075         }
1076
1077         parent = dentry->d_parent;
1078         if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1079                 return true;
1080
1081         spin_unlock(&dentry->d_lock);
1082         spin_lock(&parent->d_lock);
1083         if (unlikely(parent != dentry->d_parent)) {
1084                 spin_unlock(&parent->d_lock);
1085                 spin_lock(&dentry->d_lock);
1086                 goto out;
1087         }
1088         spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1089         if (likely(!dentry->d_lockref.count))
1090                 return true;
1091         spin_unlock(&parent->d_lock);
1092 out:
1093         if (inode)
1094                 spin_unlock(&inode->i_lock);
1095         return false;
1096 }
1097
1098 void shrink_dentry_list(struct list_head *list)
1099 {
1100         while (!list_empty(list)) {
1101                 struct dentry *dentry, *parent;
1102
1103                 dentry = list_entry(list->prev, struct dentry, d_lru);
1104                 spin_lock(&dentry->d_lock);
1105                 rcu_read_lock();
1106                 if (!shrink_lock_dentry(dentry)) {
1107                         bool can_free = false;
1108                         rcu_read_unlock();
1109                         d_shrink_del(dentry);
1110                         if (dentry->d_lockref.count < 0)
1111                                 can_free = dentry->d_flags & DCACHE_MAY_FREE;
1112                         spin_unlock(&dentry->d_lock);
1113                         if (can_free)
1114                                 dentry_free(dentry);
1115                         continue;
1116                 }
1117                 rcu_read_unlock();
1118                 d_shrink_del(dentry);
1119                 parent = dentry->d_parent;
1120                 if (parent != dentry)
1121                         __dput_to_list(parent, list);
1122                 __dentry_kill(dentry);
1123         }
1124 }
1125
1126 static enum lru_status dentry_lru_isolate(struct list_head *item,
1127                 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1128 {
1129         struct list_head *freeable = arg;
1130         struct dentry   *dentry = container_of(item, struct dentry, d_lru);
1131
1132
1133         /*
1134          * we are inverting the lru lock/dentry->d_lock here,
1135          * so use a trylock. If we fail to get the lock, just skip
1136          * it
1137          */
1138         if (!spin_trylock(&dentry->d_lock))
1139                 return LRU_SKIP;
1140
1141         /*
1142          * Referenced dentries are still in use. If they have active
1143          * counts, just remove them from the LRU. Otherwise give them
1144          * another pass through the LRU.
1145          */
1146         if (dentry->d_lockref.count) {
1147                 d_lru_isolate(lru, dentry);
1148                 spin_unlock(&dentry->d_lock);
1149                 return LRU_REMOVED;
1150         }
1151
1152         if (dentry->d_flags & DCACHE_REFERENCED) {
1153                 dentry->d_flags &= ~DCACHE_REFERENCED;
1154                 spin_unlock(&dentry->d_lock);
1155
1156                 /*
1157                  * The list move itself will be made by the common LRU code. At
1158                  * this point, we've dropped the dentry->d_lock but keep the
1159                  * lru lock. This is safe to do, since every list movement is
1160                  * protected by the lru lock even if both locks are held.
1161                  *
1162                  * This is guaranteed by the fact that all LRU management
1163                  * functions are intermediated by the LRU API calls like
1164                  * list_lru_add and list_lru_del. List movement in this file
1165                  * only ever occur through this functions or through callbacks
1166                  * like this one, that are called from the LRU API.
1167                  *
1168                  * The only exceptions to this are functions like
1169                  * shrink_dentry_list, and code that first checks for the
1170                  * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1171                  * operating only with stack provided lists after they are
1172                  * properly isolated from the main list.  It is thus, always a
1173                  * local access.
1174                  */
1175                 return LRU_ROTATE;
1176         }
1177
1178         d_lru_shrink_move(lru, dentry, freeable);
1179         spin_unlock(&dentry->d_lock);
1180
1181         return LRU_REMOVED;
1182 }
1183
1184 /**
1185  * prune_dcache_sb - shrink the dcache
1186  * @sb: superblock
1187  * @sc: shrink control, passed to list_lru_shrink_walk()
1188  *
1189  * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1190  * is done when we need more memory and called from the superblock shrinker
1191  * function.
1192  *
1193  * This function may fail to free any resources if all the dentries are in
1194  * use.
1195  */
1196 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1197 {
1198         LIST_HEAD(dispose);
1199         long freed;
1200
1201         freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1202                                      dentry_lru_isolate, &dispose);
1203         shrink_dentry_list(&dispose);
1204         return freed;
1205 }
1206
1207 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1208                 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1209 {
1210         struct list_head *freeable = arg;
1211         struct dentry   *dentry = container_of(item, struct dentry, d_lru);
1212
1213         /*
1214          * we are inverting the lru lock/dentry->d_lock here,
1215          * so use a trylock. If we fail to get the lock, just skip
1216          * it
1217          */
1218         if (!spin_trylock(&dentry->d_lock))
1219                 return LRU_SKIP;
1220
1221         d_lru_shrink_move(lru, dentry, freeable);
1222         spin_unlock(&dentry->d_lock);
1223
1224         return LRU_REMOVED;
1225 }
1226
1227
1228 /**
1229  * shrink_dcache_sb - shrink dcache for a superblock
1230  * @sb: superblock
1231  *
1232  * Shrink the dcache for the specified super block. This is used to free
1233  * the dcache before unmounting a file system.
1234  */
1235 void shrink_dcache_sb(struct super_block *sb)
1236 {
1237         do {
1238                 LIST_HEAD(dispose);
1239
1240                 list_lru_walk(&sb->s_dentry_lru,
1241                         dentry_lru_isolate_shrink, &dispose, 1024);
1242                 shrink_dentry_list(&dispose);
1243         } while (list_lru_count(&sb->s_dentry_lru) > 0);
1244 }
1245 EXPORT_SYMBOL(shrink_dcache_sb);
1246
1247 /**
1248  * enum d_walk_ret - action to talke during tree walk
1249  * @D_WALK_CONTINUE:    contrinue walk
1250  * @D_WALK_QUIT:        quit walk
1251  * @D_WALK_NORETRY:     quit when retry is needed
1252  * @D_WALK_SKIP:        skip this dentry and its children
1253  */
1254 enum d_walk_ret {
1255         D_WALK_CONTINUE,
1256         D_WALK_QUIT,
1257         D_WALK_NORETRY,
1258         D_WALK_SKIP,
1259 };
1260
1261 /**
1262  * d_walk - walk the dentry tree
1263  * @parent:     start of walk
1264  * @data:       data passed to @enter() and @finish()
1265  * @enter:      callback when first entering the dentry
1266  *
1267  * The @enter() callbacks are called with d_lock held.
1268  */
1269 static void d_walk(struct dentry *parent, void *data,
1270                    enum d_walk_ret (*enter)(void *, struct dentry *))
1271 {
1272         struct dentry *this_parent;
1273         struct list_head *next;
1274         unsigned seq = 0;
1275         enum d_walk_ret ret;
1276         bool retry = true;
1277
1278 again:
1279         read_seqbegin_or_lock(&rename_lock, &seq);
1280         this_parent = parent;
1281         spin_lock(&this_parent->d_lock);
1282
1283         ret = enter(data, this_parent);
1284         switch (ret) {
1285         case D_WALK_CONTINUE:
1286                 break;
1287         case D_WALK_QUIT:
1288         case D_WALK_SKIP:
1289                 goto out_unlock;
1290         case D_WALK_NORETRY:
1291                 retry = false;
1292                 break;
1293         }
1294 repeat:
1295         next = this_parent->d_subdirs.next;
1296 resume:
1297         while (next != &this_parent->d_subdirs) {
1298                 struct list_head *tmp = next;
1299                 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1300                 next = tmp->next;
1301
1302                 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1303                         continue;
1304
1305                 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1306
1307                 ret = enter(data, dentry);
1308                 switch (ret) {
1309                 case D_WALK_CONTINUE:
1310                         break;
1311                 case D_WALK_QUIT:
1312                         spin_unlock(&dentry->d_lock);
1313                         goto out_unlock;
1314                 case D_WALK_NORETRY:
1315                         retry = false;
1316                         break;
1317                 case D_WALK_SKIP:
1318                         spin_unlock(&dentry->d_lock);
1319                         continue;
1320                 }
1321
1322                 if (!list_empty(&dentry->d_subdirs)) {
1323                         spin_unlock(&this_parent->d_lock);
1324                         spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1325                         this_parent = dentry;
1326                         spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1327                         goto repeat;
1328                 }
1329                 spin_unlock(&dentry->d_lock);
1330         }
1331         /*
1332          * All done at this level ... ascend and resume the search.
1333          */
1334         rcu_read_lock();
1335 ascend:
1336         if (this_parent != parent) {
1337                 struct dentry *child = this_parent;
1338                 this_parent = child->d_parent;
1339
1340                 spin_unlock(&child->d_lock);
1341                 spin_lock(&this_parent->d_lock);
1342
1343                 /* might go back up the wrong parent if we have had a rename. */
1344                 if (need_seqretry(&rename_lock, seq))
1345                         goto rename_retry;
1346                 /* go into the first sibling still alive */
1347                 do {
1348                         next = child->d_child.next;
1349                         if (next == &this_parent->d_subdirs)
1350                                 goto ascend;
1351                         child = list_entry(next, struct dentry, d_child);
1352                 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1353                 rcu_read_unlock();
1354                 goto resume;
1355         }
1356         if (need_seqretry(&rename_lock, seq))
1357                 goto rename_retry;
1358         rcu_read_unlock();
1359
1360 out_unlock:
1361         spin_unlock(&this_parent->d_lock);
1362         done_seqretry(&rename_lock, seq);
1363         return;
1364
1365 rename_retry:
1366         spin_unlock(&this_parent->d_lock);
1367         rcu_read_unlock();
1368         BUG_ON(seq & 1);
1369         if (!retry)
1370                 return;
1371         seq = 1;
1372         goto again;
1373 }
1374
1375 struct check_mount {
1376         struct vfsmount *mnt;
1377         unsigned int mounted;
1378 };
1379
1380 static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1381 {
1382         struct check_mount *info = data;
1383         struct path path = { .mnt = info->mnt, .dentry = dentry };
1384
1385         if (likely(!d_mountpoint(dentry)))
1386                 return D_WALK_CONTINUE;
1387         if (__path_is_mountpoint(&path)) {
1388                 info->mounted = 1;
1389                 return D_WALK_QUIT;
1390         }
1391         return D_WALK_CONTINUE;
1392 }
1393
1394 /**
1395  * path_has_submounts - check for mounts over a dentry in the
1396  *                      current namespace.
1397  * @parent: path to check.
1398  *
1399  * Return true if the parent or its subdirectories contain
1400  * a mount point in the current namespace.
1401  */
1402 int path_has_submounts(const struct path *parent)
1403 {
1404         struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1405
1406         read_seqlock_excl(&mount_lock);
1407         d_walk(parent->dentry, &data, path_check_mount);
1408         read_sequnlock_excl(&mount_lock);
1409
1410         return data.mounted;
1411 }
1412 EXPORT_SYMBOL(path_has_submounts);
1413
1414 /*
1415  * Called by mount code to set a mountpoint and check if the mountpoint is
1416  * reachable (e.g. NFS can unhash a directory dentry and then the complete
1417  * subtree can become unreachable).
1418  *
1419  * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1420  * this reason take rename_lock and d_lock on dentry and ancestors.
1421  */
1422 int d_set_mounted(struct dentry *dentry)
1423 {
1424         struct dentry *p;
1425         int ret = -ENOENT;
1426         write_seqlock(&rename_lock);
1427         for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1428                 /* Need exclusion wrt. d_invalidate() */
1429                 spin_lock(&p->d_lock);
1430                 if (unlikely(d_unhashed(p))) {
1431                         spin_unlock(&p->d_lock);
1432                         goto out;
1433                 }
1434                 spin_unlock(&p->d_lock);
1435         }
1436         spin_lock(&dentry->d_lock);
1437         if (!d_unlinked(dentry)) {
1438                 ret = -EBUSY;
1439                 if (!d_mountpoint(dentry)) {
1440                         dentry->d_flags |= DCACHE_MOUNTED;
1441                         ret = 0;
1442                 }
1443         }
1444         spin_unlock(&dentry->d_lock);
1445 out:
1446         write_sequnlock(&rename_lock);
1447         return ret;
1448 }
1449
1450 /*
1451  * Search the dentry child list of the specified parent,
1452  * and move any unused dentries to the end of the unused
1453  * list for prune_dcache(). We descend to the next level
1454  * whenever the d_subdirs list is non-empty and continue
1455  * searching.
1456  *
1457  * It returns zero iff there are no unused children,
1458  * otherwise  it returns the number of children moved to
1459  * the end of the unused list. This may not be the total
1460  * number of unused children, because select_parent can
1461  * drop the lock and return early due to latency
1462  * constraints.
1463  */
1464
1465 struct select_data {
1466         struct dentry *start;
1467         union {
1468                 long found;
1469                 struct dentry *victim;
1470         };
1471         struct list_head dispose;
1472 };
1473
1474 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1475 {
1476         struct select_data *data = _data;
1477         enum d_walk_ret ret = D_WALK_CONTINUE;
1478
1479         if (data->start == dentry)
1480                 goto out;
1481
1482         if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1483                 data->found++;
1484         } else {
1485                 if (dentry->d_flags & DCACHE_LRU_LIST)
1486                         d_lru_del(dentry);
1487                 if (!dentry->d_lockref.count) {
1488                         d_shrink_add(dentry, &data->dispose);
1489                         data->found++;
1490                 }
1491         }
1492         /*
1493          * We can return to the caller if we have found some (this
1494          * ensures forward progress). We'll be coming back to find
1495          * the rest.
1496          */
1497         if (!list_empty(&data->dispose))
1498                 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1499 out:
1500         return ret;
1501 }
1502
1503 static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1504 {
1505         struct select_data *data = _data;
1506         enum d_walk_ret ret = D_WALK_CONTINUE;
1507
1508         if (data->start == dentry)
1509                 goto out;
1510
1511         if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1512                 if (!dentry->d_lockref.count) {
1513                         rcu_read_lock();
1514                         data->victim = dentry;
1515                         return D_WALK_QUIT;
1516                 }
1517         } else {
1518                 if (dentry->d_flags & DCACHE_LRU_LIST)
1519                         d_lru_del(dentry);
1520                 if (!dentry->d_lockref.count)
1521                         d_shrink_add(dentry, &data->dispose);
1522         }
1523         /*
1524          * We can return to the caller if we have found some (this
1525          * ensures forward progress). We'll be coming back to find
1526          * the rest.
1527          */
1528         if (!list_empty(&data->dispose))
1529                 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1530 out:
1531         return ret;
1532 }
1533
1534 /**
1535  * shrink_dcache_parent - prune dcache
1536  * @parent: parent of entries to prune
1537  *
1538  * Prune the dcache to remove unused children of the parent dentry.
1539  */
1540 void shrink_dcache_parent(struct dentry *parent)
1541 {
1542         for (;;) {
1543                 struct select_data data = {.start = parent};
1544
1545                 INIT_LIST_HEAD(&data.dispose);
1546                 d_walk(parent, &data, select_collect);
1547
1548                 if (!list_empty(&data.dispose)) {
1549                         shrink_dentry_list(&data.dispose);
1550                         continue;
1551                 }
1552
1553                 cond_resched();
1554                 if (!data.found)
1555                         break;
1556                 data.victim = NULL;
1557                 d_walk(parent, &data, select_collect2);
1558                 if (data.victim) {
1559                         struct dentry *parent;
1560                         spin_lock(&data.victim->d_lock);
1561                         if (!shrink_lock_dentry(data.victim)) {
1562                                 spin_unlock(&data.victim->d_lock);
1563                                 rcu_read_unlock();
1564                         } else {
1565                                 rcu_read_unlock();
1566                                 parent = data.victim->d_parent;
1567                                 if (parent != data.victim)
1568                                         __dput_to_list(parent, &data.dispose);
1569                                 __dentry_kill(data.victim);
1570                         }
1571                 }
1572                 if (!list_empty(&data.dispose))
1573                         shrink_dentry_list(&data.dispose);
1574         }
1575 }
1576 EXPORT_SYMBOL(shrink_dcache_parent);
1577
1578 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1579 {
1580         /* it has busy descendents; complain about those instead */
1581         if (!list_empty(&dentry->d_subdirs))
1582                 return D_WALK_CONTINUE;
1583
1584         /* root with refcount 1 is fine */
1585         if (dentry == _data && dentry->d_lockref.count == 1)
1586                 return D_WALK_CONTINUE;
1587
1588         printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1589                         " still in use (%d) [unmount of %s %s]\n",
1590                        dentry,
1591                        dentry->d_inode ?
1592                        dentry->d_inode->i_ino : 0UL,
1593                        dentry,
1594                        dentry->d_lockref.count,
1595                        dentry->d_sb->s_type->name,
1596                        dentry->d_sb->s_id);
1597         WARN_ON(1);
1598         return D_WALK_CONTINUE;
1599 }
1600
1601 static void do_one_tree(struct dentry *dentry)
1602 {
1603         shrink_dcache_parent(dentry);
1604         d_walk(dentry, dentry, umount_check);
1605         d_drop(dentry);
1606         dput(dentry);
1607 }
1608
1609 /*
1610  * destroy the dentries attached to a superblock on unmounting
1611  */
1612 void shrink_dcache_for_umount(struct super_block *sb)
1613 {
1614         struct dentry *dentry;
1615
1616         WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1617
1618         dentry = sb->s_root;
1619         sb->s_root = NULL;
1620         do_one_tree(dentry);
1621
1622         while (!hlist_bl_empty(&sb->s_roots)) {
1623                 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1624                 do_one_tree(dentry);
1625         }
1626 }
1627
1628 static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1629 {
1630         struct dentry **victim = _data;
1631         if (d_mountpoint(dentry)) {
1632                 __dget_dlock(dentry);
1633                 *victim = dentry;
1634                 return D_WALK_QUIT;
1635         }
1636         return D_WALK_CONTINUE;
1637 }
1638
1639 /**
1640  * d_invalidate - detach submounts, prune dcache, and drop
1641  * @dentry: dentry to invalidate (aka detach, prune and drop)
1642  */
1643 void d_invalidate(struct dentry *dentry)
1644 {
1645         bool had_submounts = false;
1646         spin_lock(&dentry->d_lock);
1647         if (d_unhashed(dentry)) {
1648                 spin_unlock(&dentry->d_lock);
1649                 return;
1650         }
1651         __d_drop(dentry);
1652         spin_unlock(&dentry->d_lock);
1653
1654         /* Negative dentries can be dropped without further checks */
1655         if (!dentry->d_inode)
1656                 return;
1657
1658         shrink_dcache_parent(dentry);
1659         for (;;) {
1660                 struct dentry *victim = NULL;
1661                 d_walk(dentry, &victim, find_submount);
1662                 if (!victim) {
1663                         if (had_submounts)
1664                                 shrink_dcache_parent(dentry);
1665                         return;
1666                 }
1667                 had_submounts = true;
1668                 detach_mounts(victim);
1669                 dput(victim);
1670         }
1671 }
1672 EXPORT_SYMBOL(d_invalidate);
1673
1674 /**
1675  * __d_alloc    -       allocate a dcache entry
1676  * @sb: filesystem it will belong to
1677  * @name: qstr of the name
1678  *
1679  * Allocates a dentry. It returns %NULL if there is insufficient memory
1680  * available. On a success the dentry is returned. The name passed in is
1681  * copied and the copy passed in may be reused after this call.
1682  */
1683  
1684 static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1685 {
1686         struct dentry *dentry;
1687         char *dname;
1688         int err;
1689
1690         dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1691         if (!dentry)
1692                 return NULL;
1693
1694         /*
1695          * We guarantee that the inline name is always NUL-terminated.
1696          * This way the memcpy() done by the name switching in rename
1697          * will still always have a NUL at the end, even if we might
1698          * be overwriting an internal NUL character
1699          */
1700         dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1701         if (unlikely(!name)) {
1702                 name = &slash_name;
1703                 dname = dentry->d_iname;
1704         } else if (name->len > DNAME_INLINE_LEN-1) {
1705                 size_t size = offsetof(struct external_name, name[1]);
1706                 struct external_name *p = kmalloc(size + name->len,
1707                                                   GFP_KERNEL_ACCOUNT |
1708                                                   __GFP_RECLAIMABLE);
1709                 if (!p) {
1710                         kmem_cache_free(dentry_cache, dentry); 
1711                         return NULL;
1712                 }
1713                 atomic_set(&p->u.count, 1);
1714                 dname = p->name;
1715         } else  {
1716                 dname = dentry->d_iname;
1717         }       
1718
1719         dentry->d_name.len = name->len;
1720         dentry->d_name.hash = name->hash;
1721         memcpy(dname, name->name, name->len);
1722         dname[name->len] = 0;
1723
1724         /* Make sure we always see the terminating NUL character */
1725         smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
1726
1727         dentry->d_lockref.count = 1;
1728         dentry->d_flags = 0;
1729         spin_lock_init(&dentry->d_lock);
1730         seqcount_init(&dentry->d_seq);
1731         dentry->d_inode = NULL;
1732         dentry->d_parent = dentry;
1733         dentry->d_sb = sb;
1734         dentry->d_op = NULL;
1735         dentry->d_fsdata = NULL;
1736         INIT_HLIST_BL_NODE(&dentry->d_hash);
1737         INIT_LIST_HEAD(&dentry->d_lru);
1738         INIT_LIST_HEAD(&dentry->d_subdirs);
1739         INIT_HLIST_NODE(&dentry->d_u.d_alias);
1740         INIT_LIST_HEAD(&dentry->d_child);
1741         d_set_d_op(dentry, dentry->d_sb->s_d_op);
1742
1743         if (dentry->d_op && dentry->d_op->d_init) {
1744                 err = dentry->d_op->d_init(dentry);
1745                 if (err) {
1746                         if (dname_external(dentry))
1747                                 kfree(external_name(dentry));
1748                         kmem_cache_free(dentry_cache, dentry);
1749                         return NULL;
1750                 }
1751         }
1752
1753         this_cpu_inc(nr_dentry);
1754
1755         return dentry;
1756 }
1757
1758 /**
1759  * d_alloc      -       allocate a dcache entry
1760  * @parent: parent of entry to allocate
1761  * @name: qstr of the name
1762  *
1763  * Allocates a dentry. It returns %NULL if there is insufficient memory
1764  * available. On a success the dentry is returned. The name passed in is
1765  * copied and the copy passed in may be reused after this call.
1766  */
1767 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1768 {
1769         struct dentry *dentry = __d_alloc(parent->d_sb, name);
1770         if (!dentry)
1771                 return NULL;
1772         spin_lock(&parent->d_lock);
1773         /*
1774          * don't need child lock because it is not subject
1775          * to concurrency here
1776          */
1777         __dget_dlock(parent);
1778         dentry->d_parent = parent;
1779         list_add(&dentry->d_child, &parent->d_subdirs);
1780         spin_unlock(&parent->d_lock);
1781
1782         return dentry;
1783 }
1784 EXPORT_SYMBOL(d_alloc);
1785
1786 struct dentry *d_alloc_anon(struct super_block *sb)
1787 {
1788         return __d_alloc(sb, NULL);
1789 }
1790 EXPORT_SYMBOL(d_alloc_anon);
1791
1792 struct dentry *d_alloc_cursor(struct dentry * parent)
1793 {
1794         struct dentry *dentry = d_alloc_anon(parent->d_sb);
1795         if (dentry) {
1796                 dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1797                 dentry->d_parent = dget(parent);
1798         }
1799         return dentry;
1800 }
1801
1802 /**
1803  * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1804  * @sb: the superblock
1805  * @name: qstr of the name
1806  *
1807  * For a filesystem that just pins its dentries in memory and never
1808  * performs lookups at all, return an unhashed IS_ROOT dentry.
1809  * This is used for pipes, sockets et.al. - the stuff that should
1810  * never be anyone's children or parents.  Unlike all other
1811  * dentries, these will not have RCU delay between dropping the
1812  * last reference and freeing them.
1813  *
1814  * The only user is alloc_file_pseudo() and that's what should
1815  * be considered a public interface.  Don't use directly.
1816  */
1817 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1818 {
1819         struct dentry *dentry = __d_alloc(sb, name);
1820         if (likely(dentry))
1821                 dentry->d_flags |= DCACHE_NORCU;
1822         return dentry;
1823 }
1824
1825 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1826 {
1827         struct qstr q;
1828
1829         q.name = name;
1830         q.hash_len = hashlen_string(parent, name);
1831         return d_alloc(parent, &q);
1832 }
1833 EXPORT_SYMBOL(d_alloc_name);
1834
1835 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1836 {
1837         WARN_ON_ONCE(dentry->d_op);
1838         WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH  |
1839                                 DCACHE_OP_COMPARE       |
1840                                 DCACHE_OP_REVALIDATE    |
1841                                 DCACHE_OP_WEAK_REVALIDATE       |
1842                                 DCACHE_OP_DELETE        |
1843                                 DCACHE_OP_REAL));
1844         dentry->d_op = op;
1845         if (!op)
1846                 return;
1847         if (op->d_hash)
1848                 dentry->d_flags |= DCACHE_OP_HASH;
1849         if (op->d_compare)
1850                 dentry->d_flags |= DCACHE_OP_COMPARE;
1851         if (op->d_revalidate)
1852                 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1853         if (op->d_weak_revalidate)
1854                 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1855         if (op->d_delete)
1856                 dentry->d_flags |= DCACHE_OP_DELETE;
1857         if (op->d_prune)
1858                 dentry->d_flags |= DCACHE_OP_PRUNE;
1859         if (op->d_real)
1860                 dentry->d_flags |= DCACHE_OP_REAL;
1861
1862 }
1863 EXPORT_SYMBOL(d_set_d_op);
1864
1865
1866 /*
1867  * d_set_fallthru - Mark a dentry as falling through to a lower layer
1868  * @dentry - The dentry to mark
1869  *
1870  * Mark a dentry as falling through to the lower layer (as set with
1871  * d_pin_lower()).  This flag may be recorded on the medium.
1872  */
1873 void d_set_fallthru(struct dentry *dentry)
1874 {
1875         spin_lock(&dentry->d_lock);
1876         dentry->d_flags |= DCACHE_FALLTHRU;
1877         spin_unlock(&dentry->d_lock);
1878 }
1879 EXPORT_SYMBOL(d_set_fallthru);
1880
1881 static unsigned d_flags_for_inode(struct inode *inode)
1882 {
1883         unsigned add_flags = DCACHE_REGULAR_TYPE;
1884
1885         if (!inode)
1886                 return DCACHE_MISS_TYPE;
1887
1888         if (S_ISDIR(inode->i_mode)) {
1889                 add_flags = DCACHE_DIRECTORY_TYPE;
1890                 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1891                         if (unlikely(!inode->i_op->lookup))
1892                                 add_flags = DCACHE_AUTODIR_TYPE;
1893                         else
1894                                 inode->i_opflags |= IOP_LOOKUP;
1895                 }
1896                 goto type_determined;
1897         }
1898
1899         if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1900                 if (unlikely(inode->i_op->get_link)) {
1901                         add_flags = DCACHE_SYMLINK_TYPE;
1902                         goto type_determined;
1903                 }
1904                 inode->i_opflags |= IOP_NOFOLLOW;
1905         }
1906
1907         if (unlikely(!S_ISREG(inode->i_mode)))
1908                 add_flags = DCACHE_SPECIAL_TYPE;
1909
1910 type_determined:
1911         if (unlikely(IS_AUTOMOUNT(inode)))
1912                 add_flags |= DCACHE_NEED_AUTOMOUNT;
1913         return add_flags;
1914 }
1915
1916 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1917 {
1918         unsigned add_flags = d_flags_for_inode(inode);
1919         WARN_ON(d_in_lookup(dentry));
1920
1921         spin_lock(&dentry->d_lock);
1922         /*
1923          * Decrement negative dentry count if it was in the LRU list.
1924          */
1925         if (dentry->d_flags & DCACHE_LRU_LIST)
1926                 this_cpu_dec(nr_dentry_negative);
1927         hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1928         raw_write_seqcount_begin(&dentry->d_seq);
1929         __d_set_inode_and_type(dentry, inode, add_flags);
1930         raw_write_seqcount_end(&dentry->d_seq);
1931         fsnotify_update_flags(dentry);
1932         spin_unlock(&dentry->d_lock);
1933 }
1934
1935 /**
1936  * d_instantiate - fill in inode information for a dentry
1937  * @entry: dentry to complete
1938  * @inode: inode to attach to this dentry
1939  *
1940  * Fill in inode information in the entry.
1941  *
1942  * This turns negative dentries into productive full members
1943  * of society.
1944  *
1945  * NOTE! This assumes that the inode count has been incremented
1946  * (or otherwise set) by the caller to indicate that it is now
1947  * in use by the dcache.
1948  */
1949  
1950 void d_instantiate(struct dentry *entry, struct inode * inode)
1951 {
1952         BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1953         if (inode) {
1954                 security_d_instantiate(entry, inode);
1955                 spin_lock(&inode->i_lock);
1956                 __d_instantiate(entry, inode);
1957                 spin_unlock(&inode->i_lock);
1958         }
1959 }
1960 EXPORT_SYMBOL(d_instantiate);
1961
1962 /*
1963  * This should be equivalent to d_instantiate() + unlock_new_inode(),
1964  * with lockdep-related part of unlock_new_inode() done before
1965  * anything else.  Use that instead of open-coding d_instantiate()/
1966  * unlock_new_inode() combinations.
1967  */
1968 void d_instantiate_new(struct dentry *entry, struct inode *inode)
1969 {
1970         BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1971         BUG_ON(!inode);
1972         lockdep_annotate_inode_mutex_key(inode);
1973         security_d_instantiate(entry, inode);
1974         spin_lock(&inode->i_lock);
1975         __d_instantiate(entry, inode);
1976         WARN_ON(!(inode->i_state & I_NEW));
1977         inode->i_state &= ~I_NEW & ~I_CREATING;
1978         smp_mb();
1979         wake_up_bit(&inode->i_state, __I_NEW);
1980         spin_unlock(&inode->i_lock);
1981 }
1982 EXPORT_SYMBOL(d_instantiate_new);
1983
1984 struct dentry *d_make_root(struct inode *root_inode)
1985 {
1986         struct dentry *res = NULL;
1987
1988         if (root_inode) {
1989                 res = d_alloc_anon(root_inode->i_sb);
1990                 if (res)
1991                         d_instantiate(res, root_inode);
1992                 else
1993                         iput(root_inode);
1994         }
1995         return res;
1996 }
1997 EXPORT_SYMBOL(d_make_root);
1998
1999 static struct dentry *__d_instantiate_anon(struct dentry *dentry,
2000                                            struct inode *inode,
2001                                            bool disconnected)
2002 {
2003         struct dentry *res;
2004         unsigned add_flags;
2005
2006         security_d_instantiate(dentry, inode);
2007         spin_lock(&inode->i_lock);
2008         res = __d_find_any_alias(inode);
2009         if (res) {
2010                 spin_unlock(&inode->i_lock);
2011                 dput(dentry);
2012                 goto out_iput;
2013         }
2014
2015         /* attach a disconnected dentry */
2016         add_flags = d_flags_for_inode(inode);
2017
2018         if (disconnected)
2019                 add_flags |= DCACHE_DISCONNECTED;
2020
2021         spin_lock(&dentry->d_lock);
2022         __d_set_inode_and_type(dentry, inode, add_flags);
2023         hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2024         if (!disconnected) {
2025                 hlist_bl_lock(&dentry->d_sb->s_roots);
2026                 hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
2027                 hlist_bl_unlock(&dentry->d_sb->s_roots);
2028         }
2029         spin_unlock(&dentry->d_lock);
2030         spin_unlock(&inode->i_lock);
2031
2032         return dentry;
2033
2034  out_iput:
2035         iput(inode);
2036         return res;
2037 }
2038
2039 struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
2040 {
2041         return __d_instantiate_anon(dentry, inode, true);
2042 }
2043 EXPORT_SYMBOL(d_instantiate_anon);
2044
2045 static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
2046 {
2047         struct dentry *tmp;
2048         struct dentry *res;
2049
2050         if (!inode)
2051                 return ERR_PTR(-ESTALE);
2052         if (IS_ERR(inode))
2053                 return ERR_CAST(inode);
2054
2055         res = d_find_any_alias(inode);
2056         if (res)
2057                 goto out_iput;
2058
2059         tmp = d_alloc_anon(inode->i_sb);
2060         if (!tmp) {
2061                 res = ERR_PTR(-ENOMEM);
2062                 goto out_iput;
2063         }
2064
2065         return __d_instantiate_anon(tmp, inode, disconnected);
2066
2067 out_iput:
2068         iput(inode);
2069         return res;
2070 }
2071
2072 /**
2073  * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2074  * @inode: inode to allocate the dentry for
2075  *
2076  * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2077  * similar open by handle operations.  The returned dentry may be anonymous,
2078  * or may have a full name (if the inode was already in the cache).
2079  *
2080  * When called on a directory inode, we must ensure that the inode only ever
2081  * has one dentry.  If a dentry is found, that is returned instead of
2082  * allocating a new one.
2083  *
2084  * On successful return, the reference to the inode has been transferred
2085  * to the dentry.  In case of an error the reference on the inode is released.
2086  * To make it easier to use in export operations a %NULL or IS_ERR inode may
2087  * be passed in and the error will be propagated to the return value,
2088  * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2089  */
2090 struct dentry *d_obtain_alias(struct inode *inode)
2091 {
2092         return __d_obtain_alias(inode, true);
2093 }
2094 EXPORT_SYMBOL(d_obtain_alias);
2095
2096 /**
2097  * d_obtain_root - find or allocate a dentry for a given inode
2098  * @inode: inode to allocate the dentry for
2099  *
2100  * Obtain an IS_ROOT dentry for the root of a filesystem.
2101  *
2102  * We must ensure that directory inodes only ever have one dentry.  If a
2103  * dentry is found, that is returned instead of allocating a new one.
2104  *
2105  * On successful return, the reference to the inode has been transferred
2106  * to the dentry.  In case of an error the reference on the inode is
2107  * released.  A %NULL or IS_ERR inode may be passed in and will be the
2108  * error will be propagate to the return value, with a %NULL @inode
2109  * replaced by ERR_PTR(-ESTALE).
2110  */
2111 struct dentry *d_obtain_root(struct inode *inode)
2112 {
2113         return __d_obtain_alias(inode, false);
2114 }
2115 EXPORT_SYMBOL(d_obtain_root);
2116
2117 /**
2118  * d_add_ci - lookup or allocate new dentry with case-exact name
2119  * @inode:  the inode case-insensitive lookup has found
2120  * @dentry: the negative dentry that was passed to the parent's lookup func
2121  * @name:   the case-exact name to be associated with the returned dentry
2122  *
2123  * This is to avoid filling the dcache with case-insensitive names to the
2124  * same inode, only the actual correct case is stored in the dcache for
2125  * case-insensitive filesystems.
2126  *
2127  * For a case-insensitive lookup match and if the the case-exact dentry
2128  * already exists in in the dcache, use it and return it.
2129  *
2130  * If no entry exists with the exact case name, allocate new dentry with
2131  * the exact case, and return the spliced entry.
2132  */
2133 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2134                         struct qstr *name)
2135 {
2136         struct dentry *found, *res;
2137
2138         /*
2139          * First check if a dentry matching the name already exists,
2140          * if not go ahead and create it now.
2141          */
2142         found = d_hash_and_lookup(dentry->d_parent, name);
2143         if (found) {
2144                 iput(inode);
2145                 return found;
2146         }
2147         if (d_in_lookup(dentry)) {
2148                 found = d_alloc_parallel(dentry->d_parent, name,
2149                                         dentry->d_wait);
2150                 if (IS_ERR(found) || !d_in_lookup(found)) {
2151                         iput(inode);
2152                         return found;
2153                 }
2154         } else {
2155                 found = d_alloc(dentry->d_parent, name);
2156                 if (!found) {
2157                         iput(inode);
2158                         return ERR_PTR(-ENOMEM);
2159                 } 
2160         }
2161         res = d_splice_alias(inode, found);
2162         if (res) {
2163                 dput(found);
2164                 return res;
2165         }
2166         return found;
2167 }
2168 EXPORT_SYMBOL(d_add_ci);
2169
2170
2171 static inline bool d_same_name(const struct dentry *dentry,
2172                                 const struct dentry *parent,
2173                                 const struct qstr *name)
2174 {
2175         if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2176                 if (dentry->d_name.len != name->len)
2177                         return false;
2178                 return dentry_cmp(dentry, name->name, name->len) == 0;
2179         }
2180         return parent->d_op->d_compare(dentry,
2181                                        dentry->d_name.len, dentry->d_name.name,
2182                                        name) == 0;
2183 }
2184
2185 /**
2186  * __d_lookup_rcu - search for a dentry (racy, store-free)
2187  * @parent: parent dentry
2188  * @name: qstr of name we wish to find
2189  * @seqp: returns d_seq value at the point where the dentry was found
2190  * Returns: dentry, or NULL
2191  *
2192  * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2193  * resolution (store-free path walking) design described in
2194  * Documentation/filesystems/path-lookup.txt.
2195  *
2196  * This is not to be used outside core vfs.
2197  *
2198  * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2199  * held, and rcu_read_lock held. The returned dentry must not be stored into
2200  * without taking d_lock and checking d_seq sequence count against @seq
2201  * returned here.
2202  *
2203  * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2204  * function.
2205  *
2206  * Alternatively, __d_lookup_rcu may be called again to look up the child of
2207  * the returned dentry, so long as its parent's seqlock is checked after the
2208  * child is looked up. Thus, an interlocking stepping of sequence lock checks
2209  * is formed, giving integrity down the path walk.
2210  *
2211  * NOTE! The caller *has* to check the resulting dentry against the sequence
2212  * number we've returned before using any of the resulting dentry state!
2213  */
2214 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2215                                 const struct qstr *name,
2216                                 unsigned *seqp)
2217 {
2218         u64 hashlen = name->hash_len;
2219         const unsigned char *str = name->name;
2220         struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2221         struct hlist_bl_node *node;
2222         struct dentry *dentry;
2223
2224         /*
2225          * Note: There is significant duplication with __d_lookup_rcu which is
2226          * required to prevent single threaded performance regressions
2227          * especially on architectures where smp_rmb (in seqcounts) are costly.
2228          * Keep the two functions in sync.
2229          */
2230
2231         /*
2232          * The hash list is protected using RCU.
2233          *
2234          * Carefully use d_seq when comparing a candidate dentry, to avoid
2235          * races with d_move().
2236          *
2237          * It is possible that concurrent renames can mess up our list
2238          * walk here and result in missing our dentry, resulting in the
2239          * false-negative result. d_lookup() protects against concurrent
2240          * renames using rename_lock seqlock.
2241          *
2242          * See Documentation/filesystems/path-lookup.txt for more details.
2243          */
2244         hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2245                 unsigned seq;
2246
2247 seqretry:
2248                 /*
2249                  * The dentry sequence count protects us from concurrent
2250                  * renames, and thus protects parent and name fields.
2251                  *
2252                  * The caller must perform a seqcount check in order
2253                  * to do anything useful with the returned dentry.
2254                  *
2255                  * NOTE! We do a "raw" seqcount_begin here. That means that
2256                  * we don't wait for the sequence count to stabilize if it
2257                  * is in the middle of a sequence change. If we do the slow
2258                  * dentry compare, we will do seqretries until it is stable,
2259                  * and if we end up with a successful lookup, we actually
2260                  * want to exit RCU lookup anyway.
2261                  *
2262                  * Note that raw_seqcount_begin still *does* smp_rmb(), so
2263                  * we are still guaranteed NUL-termination of ->d_name.name.
2264                  */
2265                 seq = raw_seqcount_begin(&dentry->d_seq);
2266                 if (dentry->d_parent != parent)
2267                         continue;
2268                 if (d_unhashed(dentry))
2269                         continue;
2270
2271                 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2272                         int tlen;
2273                         const char *tname;
2274                         if (dentry->d_name.hash != hashlen_hash(hashlen))
2275                                 continue;
2276                         tlen = dentry->d_name.len;
2277                         tname = dentry->d_name.name;
2278                         /* we want a consistent (name,len) pair */
2279                         if (read_seqcount_retry(&dentry->d_seq, seq)) {
2280                                 cpu_relax();
2281                                 goto seqretry;
2282                         }
2283                         if (parent->d_op->d_compare(dentry,
2284                                                     tlen, tname, name) != 0)
2285                                 continue;
2286                 } else {
2287                         if (dentry->d_name.hash_len != hashlen)
2288                                 continue;
2289                         if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2290                                 continue;
2291                 }
2292                 *seqp = seq;
2293                 return dentry;
2294         }
2295         return NULL;
2296 }
2297
2298 /**
2299  * d_lookup - search for a dentry
2300  * @parent: parent dentry
2301  * @name: qstr of name we wish to find
2302  * Returns: dentry, or NULL
2303  *
2304  * d_lookup searches the children of the parent dentry for the name in
2305  * question. If the dentry is found its reference count is incremented and the
2306  * dentry is returned. The caller must use dput to free the entry when it has
2307  * finished using it. %NULL is returned if the dentry does not exist.
2308  */
2309 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2310 {
2311         struct dentry *dentry;
2312         unsigned seq;
2313
2314         do {
2315                 seq = read_seqbegin(&rename_lock);
2316                 dentry = __d_lookup(parent, name);
2317                 if (dentry)
2318                         break;
2319         } while (read_seqretry(&rename_lock, seq));
2320         return dentry;
2321 }
2322 EXPORT_SYMBOL(d_lookup);
2323
2324 /**
2325  * __d_lookup - search for a dentry (racy)
2326  * @parent: parent dentry
2327  * @name: qstr of name we wish to find
2328  * Returns: dentry, or NULL
2329  *
2330  * __d_lookup is like d_lookup, however it may (rarely) return a
2331  * false-negative result due to unrelated rename activity.
2332  *
2333  * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2334  * however it must be used carefully, eg. with a following d_lookup in
2335  * the case of failure.
2336  *
2337  * __d_lookup callers must be commented.
2338  */
2339 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2340 {
2341         unsigned int hash = name->hash;
2342         struct hlist_bl_head *b = d_hash(hash);
2343         struct hlist_bl_node *node;
2344         struct dentry *found = NULL;
2345         struct dentry *dentry;
2346
2347         /*
2348          * Note: There is significant duplication with __d_lookup_rcu which is
2349          * required to prevent single threaded performance regressions
2350          * especially on architectures where smp_rmb (in seqcounts) are costly.
2351          * Keep the two functions in sync.
2352          */
2353
2354         /*
2355          * The hash list is protected using RCU.
2356          *
2357          * Take d_lock when comparing a candidate dentry, to avoid races
2358          * with d_move().
2359          *
2360          * It is possible that concurrent renames can mess up our list
2361          * walk here and result in missing our dentry, resulting in the
2362          * false-negative result. d_lookup() protects against concurrent
2363          * renames using rename_lock seqlock.
2364          *
2365          * See Documentation/filesystems/path-lookup.txt for more details.
2366          */
2367         rcu_read_lock();
2368         
2369         hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2370
2371                 if (dentry->d_name.hash != hash)
2372                         continue;
2373
2374                 spin_lock(&dentry->d_lock);
2375                 if (dentry->d_parent != parent)
2376                         goto next;
2377                 if (d_unhashed(dentry))
2378                         goto next;
2379
2380                 if (!d_same_name(dentry, parent, name))
2381                         goto next;
2382
2383                 dentry->d_lockref.count++;
2384                 found = dentry;
2385                 spin_unlock(&dentry->d_lock);
2386                 break;
2387 next:
2388                 spin_unlock(&dentry->d_lock);
2389         }
2390         rcu_read_unlock();
2391
2392         return found;
2393 }
2394
2395 /**
2396  * d_hash_and_lookup - hash the qstr then search for a dentry
2397  * @dir: Directory to search in
2398  * @name: qstr of name we wish to find
2399  *
2400  * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2401  */
2402 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2403 {
2404         /*
2405          * Check for a fs-specific hash function. Note that we must
2406          * calculate the standard hash first, as the d_op->d_hash()
2407          * routine may choose to leave the hash value unchanged.
2408          */
2409         name->hash = full_name_hash(dir, name->name, name->len);
2410         if (dir->d_flags & DCACHE_OP_HASH) {
2411                 int err = dir->d_op->d_hash(dir, name);
2412                 if (unlikely(err < 0))
2413                         return ERR_PTR(err);
2414         }
2415         return d_lookup(dir, name);
2416 }
2417 EXPORT_SYMBOL(d_hash_and_lookup);
2418
2419 /*
2420  * When a file is deleted, we have two options:
2421  * - turn this dentry into a negative dentry
2422  * - unhash this dentry and free it.
2423  *
2424  * Usually, we want to just turn this into
2425  * a negative dentry, but if anybody else is
2426  * currently using the dentry or the inode
2427  * we can't do that and we fall back on removing
2428  * it from the hash queues and waiting for
2429  * it to be deleted later when it has no users
2430  */
2431  
2432 /**
2433  * d_delete - delete a dentry
2434  * @dentry: The dentry to delete
2435  *
2436  * Turn the dentry into a negative dentry if possible, otherwise
2437  * remove it from the hash queues so it can be deleted later
2438  */
2439  
2440 void d_delete(struct dentry * dentry)
2441 {
2442         struct inode *inode = dentry->d_inode;
2443
2444         spin_lock(&inode->i_lock);
2445         spin_lock(&dentry->d_lock);
2446         /*
2447          * Are we the only user?
2448          */
2449         if (dentry->d_lockref.count == 1) {
2450                 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2451                 dentry_unlink_inode(dentry);
2452         } else {
2453                 __d_drop(dentry);
2454                 spin_unlock(&dentry->d_lock);
2455                 spin_unlock(&inode->i_lock);
2456         }
2457 }
2458 EXPORT_SYMBOL(d_delete);
2459
2460 static void __d_rehash(struct dentry *entry)
2461 {
2462         struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2463
2464         hlist_bl_lock(b);
2465         hlist_bl_add_head_rcu(&entry->d_hash, b);
2466         hlist_bl_unlock(b);
2467 }
2468
2469 /**
2470  * d_rehash     - add an entry back to the hash
2471  * @entry: dentry to add to the hash
2472  *
2473  * Adds a dentry to the hash according to its name.
2474  */
2475  
2476 void d_rehash(struct dentry * entry)
2477 {
2478         spin_lock(&entry->d_lock);
2479         __d_rehash(entry);
2480         spin_unlock(&entry->d_lock);
2481 }
2482 EXPORT_SYMBOL(d_rehash);
2483
2484 static inline unsigned start_dir_add(struct inode *dir)
2485 {
2486
2487         for (;;) {
2488                 unsigned n = dir->i_dir_seq;
2489                 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2490                         return n;
2491                 cpu_relax();
2492         }
2493 }
2494
2495 static inline void end_dir_add(struct inode *dir, unsigned n)
2496 {
2497         smp_store_release(&dir->i_dir_seq, n + 2);
2498 }
2499
2500 static void d_wait_lookup(struct dentry *dentry)
2501 {
2502         if (d_in_lookup(dentry)) {
2503                 DECLARE_WAITQUEUE(wait, current);
2504                 add_wait_queue(dentry->d_wait, &wait);
2505                 do {
2506                         set_current_state(TASK_UNINTERRUPTIBLE);
2507                         spin_unlock(&dentry->d_lock);
2508                         schedule();
2509                         spin_lock(&dentry->d_lock);
2510                 } while (d_in_lookup(dentry));
2511         }
2512 }
2513
2514 struct dentry *d_alloc_parallel(struct dentry *parent,
2515                                 const struct qstr *name,
2516                                 wait_queue_head_t *wq)
2517 {
2518         unsigned int hash = name->hash;
2519         struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2520         struct hlist_bl_node *node;
2521         struct dentry *new = d_alloc(parent, name);
2522         struct dentry *dentry;
2523         unsigned seq, r_seq, d_seq;
2524
2525         if (unlikely(!new))
2526                 return ERR_PTR(-ENOMEM);
2527
2528 retry:
2529         rcu_read_lock();
2530         seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2531         r_seq = read_seqbegin(&rename_lock);
2532         dentry = __d_lookup_rcu(parent, name, &d_seq);
2533         if (unlikely(dentry)) {
2534                 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2535                         rcu_read_unlock();
2536                         goto retry;
2537                 }
2538                 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2539                         rcu_read_unlock();
2540                         dput(dentry);
2541                         goto retry;
2542                 }
2543                 rcu_read_unlock();
2544                 dput(new);
2545                 return dentry;
2546         }
2547         if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2548                 rcu_read_unlock();
2549                 goto retry;
2550         }
2551
2552         if (unlikely(seq & 1)) {
2553                 rcu_read_unlock();
2554                 goto retry;
2555         }
2556
2557         hlist_bl_lock(b);
2558         if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2559                 hlist_bl_unlock(b);
2560                 rcu_read_unlock();
2561                 goto retry;
2562         }
2563         /*
2564          * No changes for the parent since the beginning of d_lookup().
2565          * Since all removals from the chain happen with hlist_bl_lock(),
2566          * any potential in-lookup matches are going to stay here until
2567          * we unlock the chain.  All fields are stable in everything
2568          * we encounter.
2569          */
2570         hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2571                 if (dentry->d_name.hash != hash)
2572                         continue;
2573                 if (dentry->d_parent != parent)
2574                         continue;
2575                 if (!d_same_name(dentry, parent, name))
2576                         continue;
2577                 hlist_bl_unlock(b);
2578                 /* now we can try to grab a reference */
2579                 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2580                         rcu_read_unlock();
2581                         goto retry;
2582                 }
2583
2584                 rcu_read_unlock();
2585                 /*
2586                  * somebody is likely to be still doing lookup for it;
2587                  * wait for them to finish
2588                  */
2589                 spin_lock(&dentry->d_lock);
2590                 d_wait_lookup(dentry);
2591                 /*
2592                  * it's not in-lookup anymore; in principle we should repeat
2593                  * everything from dcache lookup, but it's likely to be what
2594                  * d_lookup() would've found anyway.  If it is, just return it;
2595                  * otherwise we really have to repeat the whole thing.
2596                  */
2597                 if (unlikely(dentry->d_name.hash != hash))
2598                         goto mismatch;
2599                 if (unlikely(dentry->d_parent != parent))
2600                         goto mismatch;
2601                 if (unlikely(d_unhashed(dentry)))
2602                         goto mismatch;
2603                 if (unlikely(!d_same_name(dentry, parent, name)))
2604                         goto mismatch;
2605                 /* OK, it *is* a hashed match; return it */
2606                 spin_unlock(&dentry->d_lock);
2607                 dput(new);
2608                 return dentry;
2609         }
2610         rcu_read_unlock();
2611         /* we can't take ->d_lock here; it's OK, though. */
2612         new->d_flags |= DCACHE_PAR_LOOKUP;
2613         new->d_wait = wq;
2614         hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2615         hlist_bl_unlock(b);
2616         return new;
2617 mismatch:
2618         spin_unlock(&dentry->d_lock);
2619         dput(dentry);
2620         goto retry;
2621 }
2622 EXPORT_SYMBOL(d_alloc_parallel);
2623
2624 void __d_lookup_done(struct dentry *dentry)
2625 {
2626         struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2627                                                  dentry->d_name.hash);
2628         hlist_bl_lock(b);
2629         dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2630         __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2631         wake_up_all(dentry->d_wait);
2632         dentry->d_wait = NULL;
2633         hlist_bl_unlock(b);
2634         INIT_HLIST_NODE(&dentry->d_u.d_alias);
2635         INIT_LIST_HEAD(&dentry->d_lru);
2636 }
2637 EXPORT_SYMBOL(__d_lookup_done);
2638
2639 /* inode->i_lock held if inode is non-NULL */
2640
2641 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2642 {
2643         struct inode *dir = NULL;
2644         unsigned n;
2645         spin_lock(&dentry->d_lock);
2646         if (unlikely(d_in_lookup(dentry))) {
2647                 dir = dentry->d_parent->d_inode;
2648                 n = start_dir_add(dir);
2649                 __d_lookup_done(dentry);
2650         }
2651         if (inode) {
2652                 unsigned add_flags = d_flags_for_inode(inode);
2653                 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2654                 raw_write_seqcount_begin(&dentry->d_seq);
2655                 __d_set_inode_and_type(dentry, inode, add_flags);
2656                 raw_write_seqcount_end(&dentry->d_seq);
2657                 fsnotify_update_flags(dentry);
2658         }
2659         __d_rehash(dentry);
2660         if (dir)
2661                 end_dir_add(dir, n);
2662         spin_unlock(&dentry->d_lock);
2663         if (inode)
2664                 spin_unlock(&inode->i_lock);
2665 }
2666
2667 /**
2668  * d_add - add dentry to hash queues
2669  * @entry: dentry to add
2670  * @inode: The inode to attach to this dentry
2671  *
2672  * This adds the entry to the hash queues and initializes @inode.
2673  * The entry was actually filled in earlier during d_alloc().
2674  */
2675
2676 void d_add(struct dentry *entry, struct inode *inode)
2677 {
2678         if (inode) {
2679                 security_d_instantiate(entry, inode);
2680                 spin_lock(&inode->i_lock);
2681         }
2682         __d_add(entry, inode);
2683 }
2684 EXPORT_SYMBOL(d_add);
2685
2686 /**
2687  * d_exact_alias - find and hash an exact unhashed alias
2688  * @entry: dentry to add
2689  * @inode: The inode to go with this dentry
2690  *
2691  * If an unhashed dentry with the same name/parent and desired
2692  * inode already exists, hash and return it.  Otherwise, return
2693  * NULL.
2694  *
2695  * Parent directory should be locked.
2696  */
2697 struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2698 {
2699         struct dentry *alias;
2700         unsigned int hash = entry->d_name.hash;
2701
2702         spin_lock(&inode->i_lock);
2703         hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2704                 /*
2705                  * Don't need alias->d_lock here, because aliases with
2706                  * d_parent == entry->d_parent are not subject to name or
2707                  * parent changes, because the parent inode i_mutex is held.
2708                  */
2709                 if (alias->d_name.hash != hash)
2710                         continue;
2711                 if (alias->d_parent != entry->d_parent)
2712                         continue;
2713                 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2714                         continue;
2715                 spin_lock(&alias->d_lock);
2716                 if (!d_unhashed(alias)) {
2717                         spin_unlock(&alias->d_lock);
2718                         alias = NULL;
2719                 } else {
2720                         __dget_dlock(alias);
2721                         __d_rehash(alias);
2722                         spin_unlock(&alias->d_lock);
2723                 }
2724                 spin_unlock(&inode->i_lock);
2725                 return alias;
2726         }
2727         spin_unlock(&inode->i_lock);
2728         return NULL;
2729 }
2730 EXPORT_SYMBOL(d_exact_alias);
2731
2732 static void swap_names(struct dentry *dentry, struct dentry *target)
2733 {
2734         if (unlikely(dname_external(target))) {
2735                 if (unlikely(dname_external(dentry))) {
2736                         /*
2737                          * Both external: swap the pointers
2738                          */
2739                         swap(target->d_name.name, dentry->d_name.name);
2740                 } else {
2741                         /*
2742                          * dentry:internal, target:external.  Steal target's
2743                          * storage and make target internal.
2744                          */
2745                         memcpy(target->d_iname, dentry->d_name.name,
2746                                         dentry->d_name.len + 1);
2747                         dentry->d_name.name = target->d_name.name;
2748                         target->d_name.name = target->d_iname;
2749                 }
2750         } else {
2751                 if (unlikely(dname_external(dentry))) {
2752                         /*
2753                          * dentry:external, target:internal.  Give dentry's
2754                          * storage to target and make dentry internal
2755                          */
2756                         memcpy(dentry->d_iname, target->d_name.name,
2757                                         target->d_name.len + 1);
2758                         target->d_name.name = dentry->d_name.name;
2759                         dentry->d_name.name = dentry->d_iname;
2760                 } else {
2761                         /*
2762                          * Both are internal.
2763                          */
2764                         unsigned int i;
2765                         BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2766                         for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2767                                 swap(((long *) &dentry->d_iname)[i],
2768                                      ((long *) &target->d_iname)[i]);
2769                         }
2770                 }
2771         }
2772         swap(dentry->d_name.hash_len, target->d_name.hash_len);
2773 }
2774
2775 static void copy_name(struct dentry *dentry, struct dentry *target)
2776 {
2777         struct external_name *old_name = NULL;
2778         if (unlikely(dname_external(dentry)))
2779                 old_name = external_name(dentry);
2780         if (unlikely(dname_external(target))) {
2781                 atomic_inc(&external_name(target)->u.count);
2782                 dentry->d_name = target->d_name;
2783         } else {
2784                 memcpy(dentry->d_iname, target->d_name.name,
2785                                 target->d_name.len + 1);
2786                 dentry->d_name.name = dentry->d_iname;
2787                 dentry->d_name.hash_len = target->d_name.hash_len;
2788         }
2789         if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2790                 kfree_rcu(old_name, u.head);
2791 }
2792
2793 /*
2794  * __d_move - move a dentry
2795  * @dentry: entry to move
2796  * @target: new dentry
2797  * @exchange: exchange the two dentries
2798  *
2799  * Update the dcache to reflect the move of a file name. Negative
2800  * dcache entries should not be moved in this way. Caller must hold
2801  * rename_lock, the i_mutex of the source and target directories,
2802  * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2803  */
2804 static void __d_move(struct dentry *dentry, struct dentry *target,
2805                      bool exchange)
2806 {
2807         struct dentry *old_parent, *p;
2808         struct inode *dir = NULL;
2809         unsigned n;
2810
2811         WARN_ON(!dentry->d_inode);
2812         if (WARN_ON(dentry == target))
2813                 return;
2814
2815         BUG_ON(d_ancestor(target, dentry));
2816         old_parent = dentry->d_parent;
2817         p = d_ancestor(old_parent, target);
2818         if (IS_ROOT(dentry)) {
2819                 BUG_ON(p);
2820                 spin_lock(&target->d_parent->d_lock);
2821         } else if (!p) {
2822                 /* target is not a descendent of dentry->d_parent */
2823                 spin_lock(&target->d_parent->d_lock);
2824                 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2825         } else {
2826                 BUG_ON(p == dentry);
2827                 spin_lock(&old_parent->d_lock);
2828                 if (p != target)
2829                         spin_lock_nested(&target->d_parent->d_lock,
2830                                         DENTRY_D_LOCK_NESTED);
2831         }
2832         spin_lock_nested(&dentry->d_lock, 2);
2833         spin_lock_nested(&target->d_lock, 3);
2834
2835         if (unlikely(d_in_lookup(target))) {
2836                 dir = target->d_parent->d_inode;
2837                 n = start_dir_add(dir);
2838                 __d_lookup_done(target);
2839         }
2840
2841         write_seqcount_begin(&dentry->d_seq);
2842         write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2843
2844         /* unhash both */
2845         if (!d_unhashed(dentry))
2846                 ___d_drop(dentry);
2847         if (!d_unhashed(target))
2848                 ___d_drop(target);
2849
2850         /* ... and switch them in the tree */
2851         dentry->d_parent = target->d_parent;
2852         if (!exchange) {
2853                 copy_name(dentry, target);
2854                 target->d_hash.pprev = NULL;
2855                 dentry->d_parent->d_lockref.count++;
2856                 if (dentry != old_parent) /* wasn't IS_ROOT */
2857                         WARN_ON(!--old_parent->d_lockref.count);
2858         } else {
2859                 target->d_parent = old_parent;
2860                 swap_names(dentry, target);
2861                 list_move(&target->d_child, &target->d_parent->d_subdirs);
2862                 __d_rehash(target);
2863                 fsnotify_update_flags(target);
2864         }
2865         list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2866         __d_rehash(dentry);
2867         fsnotify_update_flags(dentry);
2868         fscrypt_handle_d_move(dentry);
2869
2870         write_seqcount_end(&target->d_seq);
2871         write_seqcount_end(&dentry->d_seq);
2872
2873         if (dir)
2874                 end_dir_add(dir, n);
2875
2876         if (dentry->d_parent != old_parent)
2877                 spin_unlock(&dentry->d_parent->d_lock);
2878         if (dentry != old_parent)
2879                 spin_unlock(&old_parent->d_lock);
2880         spin_unlock(&target->d_lock);
2881         spin_unlock(&dentry->d_lock);
2882 }
2883
2884 /*
2885  * d_move - move a dentry
2886  * @dentry: entry to move
2887  * @target: new dentry
2888  *
2889  * Update the dcache to reflect the move of a file name. Negative
2890  * dcache entries should not be moved in this way. See the locking
2891  * requirements for __d_move.
2892  */
2893 void d_move(struct dentry *dentry, struct dentry *target)
2894 {
2895         write_seqlock(&rename_lock);
2896         __d_move(dentry, target, false);
2897         write_sequnlock(&rename_lock);
2898 }
2899 EXPORT_SYMBOL(d_move);
2900
2901 /*
2902  * d_exchange - exchange two dentries
2903  * @dentry1: first dentry
2904  * @dentry2: second dentry
2905  */
2906 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2907 {
2908         write_seqlock(&rename_lock);
2909
2910         WARN_ON(!dentry1->d_inode);
2911         WARN_ON(!dentry2->d_inode);
2912         WARN_ON(IS_ROOT(dentry1));
2913         WARN_ON(IS_ROOT(dentry2));
2914
2915         __d_move(dentry1, dentry2, true);
2916
2917         write_sequnlock(&rename_lock);
2918 }
2919
2920 /**
2921  * d_ancestor - search for an ancestor
2922  * @p1: ancestor dentry
2923  * @p2: child dentry
2924  *
2925  * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2926  * an ancestor of p2, else NULL.
2927  */
2928 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2929 {
2930         struct dentry *p;
2931
2932         for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2933                 if (p->d_parent == p1)
2934                         return p;
2935         }
2936         return NULL;
2937 }
2938
2939 /*
2940  * This helper attempts to cope with remotely renamed directories
2941  *
2942  * It assumes that the caller is already holding
2943  * dentry->d_parent->d_inode->i_mutex, and rename_lock
2944  *
2945  * Note: If ever the locking in lock_rename() changes, then please
2946  * remember to update this too...
2947  */
2948 static int __d_unalias(struct inode *inode,
2949                 struct dentry *dentry, struct dentry *alias)
2950 {
2951         struct mutex *m1 = NULL;
2952         struct rw_semaphore *m2 = NULL;
2953         int ret = -ESTALE;
2954
2955         /* If alias and dentry share a parent, then no extra locks required */
2956         if (alias->d_parent == dentry->d_parent)
2957                 goto out_unalias;
2958
2959         /* See lock_rename() */
2960         if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2961                 goto out_err;
2962         m1 = &dentry->d_sb->s_vfs_rename_mutex;
2963         if (!inode_trylock_shared(alias->d_parent->d_inode))
2964                 goto out_err;
2965         m2 = &alias->d_parent->d_inode->i_rwsem;
2966 out_unalias:
2967         __d_move(alias, dentry, false);
2968         ret = 0;
2969 out_err:
2970         if (m2)
2971                 up_read(m2);
2972         if (m1)
2973                 mutex_unlock(m1);
2974         return ret;
2975 }
2976
2977 /**
2978  * d_splice_alias - splice a disconnected dentry into the tree if one exists
2979  * @inode:  the inode which may have a disconnected dentry
2980  * @dentry: a negative dentry which we want to point to the inode.
2981  *
2982  * If inode is a directory and has an IS_ROOT alias, then d_move that in
2983  * place of the given dentry and return it, else simply d_add the inode
2984  * to the dentry and return NULL.
2985  *
2986  * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2987  * we should error out: directories can't have multiple aliases.
2988  *
2989  * This is needed in the lookup routine of any filesystem that is exportable
2990  * (via knfsd) so that we can build dcache paths to directories effectively.
2991  *
2992  * If a dentry was found and moved, then it is returned.  Otherwise NULL
2993  * is returned.  This matches the expected return value of ->lookup.
2994  *
2995  * Cluster filesystems may call this function with a negative, hashed dentry.
2996  * In that case, we know that the inode will be a regular file, and also this
2997  * will only occur during atomic_open. So we need to check for the dentry
2998  * being already hashed only in the final case.
2999  */
3000 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3001 {
3002         if (IS_ERR(inode))
3003                 return ERR_CAST(inode);
3004
3005         BUG_ON(!d_unhashed(dentry));
3006
3007         if (!inode)
3008                 goto out;
3009
3010         security_d_instantiate(dentry, inode);
3011         spin_lock(&inode->i_lock);
3012         if (S_ISDIR(inode->i_mode)) {
3013                 struct dentry *new = __d_find_any_alias(inode);
3014                 if (unlikely(new)) {
3015                         /* The reference to new ensures it remains an alias */
3016                         spin_unlock(&inode->i_lock);
3017                         write_seqlock(&rename_lock);
3018                         if (unlikely(d_ancestor(new, dentry))) {
3019                                 write_sequnlock(&rename_lock);
3020                                 dput(new);
3021                                 new = ERR_PTR(-ELOOP);
3022                                 pr_warn_ratelimited(
3023                                         "VFS: Lookup of '%s' in %s %s"
3024                                         " would have caused loop\n",
3025                                         dentry->d_name.name,
3026                                         inode->i_sb->s_type->name,
3027                                         inode->i_sb->s_id);
3028                         } else if (!IS_ROOT(new)) {
3029                                 struct dentry *old_parent = dget(new->d_parent);
3030                                 int err = __d_unalias(inode, dentry, new);
3031                                 write_sequnlock(&rename_lock);
3032                                 if (err) {
3033                                         dput(new);
3034                                         new = ERR_PTR(err);
3035                                 }
3036                                 dput(old_parent);
3037                         } else {
3038                                 __d_move(new, dentry, false);
3039                                 write_sequnlock(&rename_lock);
3040                         }
3041                         iput(inode);
3042                         return new;
3043                 }
3044         }
3045 out:
3046         __d_add(dentry, inode);
3047         return NULL;
3048 }
3049 EXPORT_SYMBOL(d_splice_alias);
3050
3051 /*
3052  * Test whether new_dentry is a subdirectory of old_dentry.
3053  *
3054  * Trivially implemented using the dcache structure
3055  */
3056
3057 /**
3058  * is_subdir - is new dentry a subdirectory of old_dentry
3059  * @new_dentry: new dentry
3060  * @old_dentry: old dentry
3061  *
3062  * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3063  * Returns false otherwise.
3064  * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3065  */
3066   
3067 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3068 {
3069         bool result;
3070         unsigned seq;
3071
3072         if (new_dentry == old_dentry)
3073                 return true;
3074
3075         do {
3076                 /* for restarting inner loop in case of seq retry */
3077                 seq = read_seqbegin(&rename_lock);
3078                 /*
3079                  * Need rcu_readlock to protect against the d_parent trashing
3080                  * due to d_move
3081                  */
3082                 rcu_read_lock();
3083                 if (d_ancestor(old_dentry, new_dentry))
3084                         result = true;
3085                 else
3086                         result = false;
3087                 rcu_read_unlock();
3088         } while (read_seqretry(&rename_lock, seq));
3089
3090         return result;
3091 }
3092 EXPORT_SYMBOL(is_subdir);
3093
3094 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3095 {
3096         struct dentry *root = data;
3097         if (dentry != root) {
3098                 if (d_unhashed(dentry) || !dentry->d_inode)
3099                         return D_WALK_SKIP;
3100
3101                 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3102                         dentry->d_flags |= DCACHE_GENOCIDE;
3103                         dentry->d_lockref.count--;
3104                 }
3105         }
3106         return D_WALK_CONTINUE;
3107 }
3108
3109 void d_genocide(struct dentry *parent)
3110 {
3111         d_walk(parent, parent, d_genocide_kill);
3112 }
3113
3114 EXPORT_SYMBOL(d_genocide);
3115
3116 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3117 {
3118         inode_dec_link_count(inode);
3119         BUG_ON(dentry->d_name.name != dentry->d_iname ||
3120                 !hlist_unhashed(&dentry->d_u.d_alias) ||
3121                 !d_unlinked(dentry));
3122         spin_lock(&dentry->d_parent->d_lock);
3123         spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3124         dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3125                                 (unsigned long long)inode->i_ino);
3126         spin_unlock(&dentry->d_lock);
3127         spin_unlock(&dentry->d_parent->d_lock);
3128         d_instantiate(dentry, inode);
3129 }
3130 EXPORT_SYMBOL(d_tmpfile);
3131
3132 static __initdata unsigned long dhash_entries;
3133 static int __init set_dhash_entries(char *str)
3134 {
3135         if (!str)
3136                 return 0;
3137         dhash_entries = simple_strtoul(str, &str, 0);
3138         return 1;
3139 }
3140 __setup("dhash_entries=", set_dhash_entries);
3141
3142 static void __init dcache_init_early(void)
3143 {
3144         /* If hashes are distributed across NUMA nodes, defer
3145          * hash allocation until vmalloc space is available.
3146          */
3147         if (hashdist)
3148                 return;
3149
3150         dentry_hashtable =
3151                 alloc_large_system_hash("Dentry cache",
3152                                         sizeof(struct hlist_bl_head),
3153                                         dhash_entries,
3154                                         13,
3155                                         HASH_EARLY | HASH_ZERO,
3156                                         &d_hash_shift,
3157                                         NULL,
3158                                         0,
3159                                         0);
3160         d_hash_shift = 32 - d_hash_shift;
3161 }
3162
3163 static void __init dcache_init(void)
3164 {
3165         /*
3166          * A constructor could be added for stable state like the lists,
3167          * but it is probably not worth it because of the cache nature
3168          * of the dcache.
3169          */
3170         dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3171                 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3172                 d_iname);
3173
3174         /* Hash may have been set up in dcache_init_early */
3175         if (!hashdist)
3176                 return;
3177
3178         dentry_hashtable =
3179                 alloc_large_system_hash("Dentry cache",
3180                                         sizeof(struct hlist_bl_head),
3181                                         dhash_entries,
3182                                         13,
3183                                         HASH_ZERO,
3184                                         &d_hash_shift,
3185                                         NULL,
3186                                         0,
3187                                         0);
3188         d_hash_shift = 32 - d_hash_shift;
3189 }
3190
3191 /* SLAB cache for __getname() consumers */
3192 struct kmem_cache *names_cachep __read_mostly;
3193 EXPORT_SYMBOL(names_cachep);
3194
3195 void __init vfs_caches_init_early(void)
3196 {
3197         int i;
3198
3199         for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3200                 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3201
3202         dcache_init_early();
3203         inode_init_early();
3204 }
3205
3206 void __init vfs_caches_init(void)
3207 {
3208         names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3209                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3210
3211         dcache_init();
3212         inode_init();
3213         files_init();
3214         files_maxfiles_init();
3215         mnt_init();
3216         bdev_cache_init();
3217         chrdev_init();
3218 }