return LRU_SKIP;
}
+/*
+ * Unhash items on @dispose immediately, then queue them on the
+ * disposal workqueue to finish releasing them in the background.
+ *
+ * cel: Note that between the time list_lru_shrink_walk runs and
+ * now, these items are in the hash table but marked unhashed.
+ * Why release these outside of lru_cb ? There's no lock ordering
+ * problem since lru_cb currently takes no lock.
+ */
+static void nfsd_file_gc_dispose_list(struct list_head *dispose)
+{
+ struct nfsd_file *nf;
+
+ list_for_each_entry(nf, dispose, nf_lru) {
+ spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
+ nfsd_file_do_unhash(nf);
+ spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
+ }
+ nfsd_file_dispose_list_delayed(dispose);
+}
+
static unsigned long
nfsd_file_lru_walk_list(struct shrink_control *sc)
{
LIST_HEAD(head);
- struct nfsd_file *nf;
unsigned long ret;
if (sc)
ret = list_lru_walk(&nfsd_file_lru,
nfsd_file_lru_cb,
&head, LONG_MAX);
- list_for_each_entry(nf, &head, nf_lru) {
- spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
- nfsd_file_do_unhash(nf);
- spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
- }
- nfsd_file_dispose_list_delayed(&head);
+ nfsd_file_gc_dispose_list(&head);
return ret;
}