Merge tag 'mm-stable-2022-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / fs / xfs / xfs_icache.c
index a1941c8..2bbe791 100644 (file)
@@ -98,8 +98,9 @@ xfs_inode_alloc(
        ip->i_ino = ino;
        ip->i_mount = mp;
        memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
-       ip->i_afp = NULL;
        ip->i_cowfp = NULL;
+       memset(&ip->i_af, 0, sizeof(ip->i_af));
+       ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
        memset(&ip->i_df, 0, sizeof(ip->i_df));
        ip->i_flags = 0;
        ip->i_delayed_blks = 0;
@@ -111,6 +112,8 @@ xfs_inode_alloc(
        INIT_WORK(&ip->i_ioend_work, xfs_end_io);
        INIT_LIST_HEAD(&ip->i_ioend_list);
        spin_lock_init(&ip->i_ioend_lock);
+       ip->i_next_unlinked = NULLAGINO;
+       ip->i_prev_unlinked = NULLAGINO;
 
        return ip;
 }
@@ -130,10 +133,8 @@ xfs_inode_free_callback(
                break;
        }
 
-       if (ip->i_afp) {
-               xfs_idestroy_fork(ip->i_afp);
-               kmem_cache_free(xfs_ifork_cache, ip->i_afp);
-       }
+       xfs_ifork_zap_attr(ip);
+
        if (ip->i_cowfp) {
                xfs_idestroy_fork(ip->i_cowfp);
                kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
@@ -440,7 +441,7 @@ xfs_inodegc_queue_all(
        for_each_online_cpu(cpu) {
                gc = per_cpu_ptr(mp->m_inodegc, cpu);
                if (!llist_empty(&gc->list))
-                       queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
+                       mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
        }
 }
 
@@ -912,6 +913,7 @@ reclaim:
        ip->i_checked = 0;
        spin_unlock(&ip->i_flags_lock);
 
+       ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
        XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
@@ -1774,7 +1776,7 @@ xfs_check_delalloc(
        struct xfs_inode        *ip,
        int                     whichfork)
 {
-       struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
+       struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, whichfork);
        struct xfs_bmbt_irec    got;
        struct xfs_iext_cursor  icur;
 
@@ -1841,8 +1843,8 @@ void
 xfs_inodegc_worker(
        struct work_struct      *work)
 {
-       struct xfs_inodegc      *gc = container_of(work, struct xfs_inodegc,
-                                                       work);
+       struct xfs_inodegc      *gc = container_of(to_delayed_work(work),
+                                               struct xfs_inodegc, work);
        struct llist_node       *node = llist_del_all(&gc->list);
        struct xfs_inode        *ip, *n;
 
@@ -1862,19 +1864,29 @@ xfs_inodegc_worker(
 }
 
 /*
- * Force all currently queued inode inactivation work to run immediately and
- * wait for the work to finish.
+ * Expedite all pending inodegc work to run immediately. This does not wait for
+ * completion of the work.
  */
 void
-xfs_inodegc_flush(
+xfs_inodegc_push(
        struct xfs_mount        *mp)
 {
        if (!xfs_is_inodegc_enabled(mp))
                return;
+       trace_xfs_inodegc_push(mp, __return_address);
+       xfs_inodegc_queue_all(mp);
+}
 
+/*
+ * Force all currently queued inode inactivation work to run immediately and
+ * wait for the work to finish.
+ */
+void
+xfs_inodegc_flush(
+       struct xfs_mount        *mp)
+{
+       xfs_inodegc_push(mp);
        trace_xfs_inodegc_flush(mp, __return_address);
-
-       xfs_inodegc_queue_all(mp);
        flush_workqueue(mp->m_inodegc_wq);
 }
 
@@ -2014,6 +2026,7 @@ xfs_inodegc_queue(
        struct xfs_inodegc      *gc;
        int                     items;
        unsigned int            shrinker_hits;
+       unsigned long           queue_delay = 1;
 
        trace_xfs_inode_set_need_inactive(ip);
        spin_lock(&ip->i_flags_lock);
@@ -2025,19 +2038,26 @@ xfs_inodegc_queue(
        items = READ_ONCE(gc->items);
        WRITE_ONCE(gc->items, items + 1);
        shrinker_hits = READ_ONCE(gc->shrinker_hits);
-       put_cpu_ptr(gc);
 
-       if (!xfs_is_inodegc_enabled(mp))
+       /*
+        * We queue the work while holding the current CPU so that the work
+        * is scheduled to run on this CPU.
+        */
+       if (!xfs_is_inodegc_enabled(mp)) {
+               put_cpu_ptr(gc);
                return;
-
-       if (xfs_inodegc_want_queue_work(ip, items)) {
-               trace_xfs_inodegc_queue(mp, __return_address);
-               queue_work(mp->m_inodegc_wq, &gc->work);
        }
 
+       if (xfs_inodegc_want_queue_work(ip, items))
+               queue_delay = 0;
+
+       trace_xfs_inodegc_queue(mp, __return_address);
+       mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
+       put_cpu_ptr(gc);
+
        if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
                trace_xfs_inodegc_throttle(mp, __return_address);
-               flush_work(&gc->work);
+               flush_delayed_work(&gc->work);
        }
 }
 
@@ -2054,7 +2074,7 @@ xfs_inodegc_cpu_dead(
        unsigned int            count = 0;
 
        dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
-       cancel_work_sync(&dead_gc->work);
+       cancel_delayed_work_sync(&dead_gc->work);
 
        if (llist_empty(&dead_gc->list))
                return;
@@ -2073,12 +2093,12 @@ xfs_inodegc_cpu_dead(
        llist_add_batch(first, last, &gc->list);
        count += READ_ONCE(gc->items);
        WRITE_ONCE(gc->items, count);
-       put_cpu_ptr(gc);
 
        if (xfs_is_inodegc_enabled(mp)) {
                trace_xfs_inodegc_queue(mp, __return_address);
-               queue_work(mp->m_inodegc_wq, &gc->work);
+               mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
        }
+       put_cpu_ptr(gc);
 }
 
 /*
@@ -2173,7 +2193,7 @@ xfs_inodegc_shrinker_scan(
                        unsigned int    h = READ_ONCE(gc->shrinker_hits);
 
                        WRITE_ONCE(gc->shrinker_hits, h + 1);
-                       queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
+                       mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
                        no_items = false;
                }
        }