mm: make vm_area_alloc() initialize core fields
[linux-2.6-microblaze.git] / fs / xfs / xfs_trans_rmap.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_rmap_item.h"
17 #include "xfs_alloc.h"
18 #include "xfs_rmap.h"
19
20 /* Set the map extent flags for this reverse mapping. */
21 static void
22 xfs_trans_set_rmap_flags(
23         struct xfs_map_extent           *rmap,
24         enum xfs_rmap_intent_type       type,
25         int                             whichfork,
26         xfs_exntst_t                    state)
27 {
28         rmap->me_flags = 0;
29         if (state == XFS_EXT_UNWRITTEN)
30                 rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
31         if (whichfork == XFS_ATTR_FORK)
32                 rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
33         switch (type) {
34         case XFS_RMAP_MAP:
35                 rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
36                 break;
37         case XFS_RMAP_MAP_SHARED:
38                 rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
39                 break;
40         case XFS_RMAP_UNMAP:
41                 rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
42                 break;
43         case XFS_RMAP_UNMAP_SHARED:
44                 rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
45                 break;
46         case XFS_RMAP_CONVERT:
47                 rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
48                 break;
49         case XFS_RMAP_CONVERT_SHARED:
50                 rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
51                 break;
52         case XFS_RMAP_ALLOC:
53                 rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
54                 break;
55         case XFS_RMAP_FREE:
56                 rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
57                 break;
58         default:
59                 ASSERT(0);
60         }
61 }
62
63 struct xfs_rud_log_item *
64 xfs_trans_get_rud(
65         struct xfs_trans                *tp,
66         struct xfs_rui_log_item         *ruip)
67 {
68         struct xfs_rud_log_item         *rudp;
69
70         rudp = xfs_rud_init(tp->t_mountp, ruip);
71         xfs_trans_add_item(tp, &rudp->rud_item);
72         return rudp;
73 }
74
75 /*
76  * Finish an rmap update and log it to the RUD. Note that the transaction is
77  * marked dirty regardless of whether the rmap update succeeds or fails to
78  * support the RUI/RUD lifecycle rules.
79  */
80 int
81 xfs_trans_log_finish_rmap_update(
82         struct xfs_trans                *tp,
83         struct xfs_rud_log_item         *rudp,
84         enum xfs_rmap_intent_type       type,
85         uint64_t                        owner,
86         int                             whichfork,
87         xfs_fileoff_t                   startoff,
88         xfs_fsblock_t                   startblock,
89         xfs_filblks_t                   blockcount,
90         xfs_exntst_t                    state,
91         struct xfs_btree_cur            **pcur)
92 {
93         int                             error;
94
95         error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
96                         startblock, blockcount, state, pcur);
97
98         /*
99          * Mark the transaction dirty, even on error. This ensures the
100          * transaction is aborted, which:
101          *
102          * 1.) releases the RUI and frees the RUD
103          * 2.) shuts down the filesystem
104          */
105         tp->t_flags |= XFS_TRANS_DIRTY;
106         set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
107
108         return error;
109 }
110
111 /* Sort rmap intents by AG. */
112 static int
113 xfs_rmap_update_diff_items(
114         void                            *priv,
115         struct list_head                *a,
116         struct list_head                *b)
117 {
118         struct xfs_mount                *mp = priv;
119         struct xfs_rmap_intent          *ra;
120         struct xfs_rmap_intent          *rb;
121
122         ra = container_of(a, struct xfs_rmap_intent, ri_list);
123         rb = container_of(b, struct xfs_rmap_intent, ri_list);
124         return  XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
125                 XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
126 }
127
128 /* Get an RUI. */
129 STATIC void *
130 xfs_rmap_update_create_intent(
131         struct xfs_trans                *tp,
132         unsigned int                    count)
133 {
134         struct xfs_rui_log_item         *ruip;
135
136         ASSERT(tp != NULL);
137         ASSERT(count > 0);
138
139         ruip = xfs_rui_init(tp->t_mountp, count);
140         ASSERT(ruip != NULL);
141
142         /*
143          * Get a log_item_desc to point at the new item.
144          */
145         xfs_trans_add_item(tp, &ruip->rui_item);
146         return ruip;
147 }
148
149 /* Log rmap updates in the intent item. */
150 STATIC void
151 xfs_rmap_update_log_item(
152         struct xfs_trans                *tp,
153         void                            *intent,
154         struct list_head                *item)
155 {
156         struct xfs_rui_log_item         *ruip = intent;
157         struct xfs_rmap_intent          *rmap;
158         uint                            next_extent;
159         struct xfs_map_extent           *map;
160
161         rmap = container_of(item, struct xfs_rmap_intent, ri_list);
162
163         tp->t_flags |= XFS_TRANS_DIRTY;
164         set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
165
166         /*
167          * atomic_inc_return gives us the value after the increment;
168          * we want to use it as an array index so we need to subtract 1 from
169          * it.
170          */
171         next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
172         ASSERT(next_extent < ruip->rui_format.rui_nextents);
173         map = &ruip->rui_format.rui_extents[next_extent];
174         map->me_owner = rmap->ri_owner;
175         map->me_startblock = rmap->ri_bmap.br_startblock;
176         map->me_startoff = rmap->ri_bmap.br_startoff;
177         map->me_len = rmap->ri_bmap.br_blockcount;
178         xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
179                         rmap->ri_bmap.br_state);
180 }
181
182 /* Get an RUD so we can process all the deferred rmap updates. */
183 STATIC void *
184 xfs_rmap_update_create_done(
185         struct xfs_trans                *tp,
186         void                            *intent,
187         unsigned int                    count)
188 {
189         return xfs_trans_get_rud(tp, intent);
190 }
191
192 /* Process a deferred rmap update. */
193 STATIC int
194 xfs_rmap_update_finish_item(
195         struct xfs_trans                *tp,
196         struct xfs_defer_ops            *dop,
197         struct list_head                *item,
198         void                            *done_item,
199         void                            **state)
200 {
201         struct xfs_rmap_intent          *rmap;
202         int                             error;
203
204         rmap = container_of(item, struct xfs_rmap_intent, ri_list);
205         error = xfs_trans_log_finish_rmap_update(tp, done_item,
206                         rmap->ri_type,
207                         rmap->ri_owner, rmap->ri_whichfork,
208                         rmap->ri_bmap.br_startoff,
209                         rmap->ri_bmap.br_startblock,
210                         rmap->ri_bmap.br_blockcount,
211                         rmap->ri_bmap.br_state,
212                         (struct xfs_btree_cur **)state);
213         kmem_free(rmap);
214         return error;
215 }
216
217 /* Clean up after processing deferred rmaps. */
218 STATIC void
219 xfs_rmap_update_finish_cleanup(
220         struct xfs_trans        *tp,
221         void                    *state,
222         int                     error)
223 {
224         struct xfs_btree_cur    *rcur = state;
225
226         xfs_rmap_finish_one_cleanup(tp, rcur, error);
227 }
228
229 /* Abort all pending RUIs. */
230 STATIC void
231 xfs_rmap_update_abort_intent(
232         void                            *intent)
233 {
234         xfs_rui_release(intent);
235 }
236
237 /* Cancel a deferred rmap update. */
238 STATIC void
239 xfs_rmap_update_cancel_item(
240         struct list_head                *item)
241 {
242         struct xfs_rmap_intent          *rmap;
243
244         rmap = container_of(item, struct xfs_rmap_intent, ri_list);
245         kmem_free(rmap);
246 }
247
248 static const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
249         .type           = XFS_DEFER_OPS_TYPE_RMAP,
250         .max_items      = XFS_RUI_MAX_FAST_EXTENTS,
251         .diff_items     = xfs_rmap_update_diff_items,
252         .create_intent  = xfs_rmap_update_create_intent,
253         .abort_intent   = xfs_rmap_update_abort_intent,
254         .log_item       = xfs_rmap_update_log_item,
255         .create_done    = xfs_rmap_update_create_done,
256         .finish_item    = xfs_rmap_update_finish_item,
257         .finish_cleanup = xfs_rmap_update_finish_cleanup,
258         .cancel_item    = xfs_rmap_update_cancel_item,
259 };
260
261 /* Register the deferred op type. */
262 void
263 xfs_rmap_update_init_defer_op(void)
264 {
265         xfs_defer_init_op_type(&xfs_rmap_update_defer_type);
266 }