dm bio prison: pass cell memory in
[linux-2.6-microblaze.git] / drivers / md / dm-bio-prison.c
1 /*
2  * Copyright (C) 2012 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm.h"
8 #include "dm-bio-prison.h"
9
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14
15 /*----------------------------------------------------------------*/
16
17 struct dm_bio_prison_cell {
18         struct hlist_node list;
19         struct dm_cell_key key;
20         struct bio *holder;
21         struct bio_list bios;
22 };
23
24 struct dm_bio_prison {
25         spinlock_t lock;
26         mempool_t *cell_pool;
27
28         unsigned nr_buckets;
29         unsigned hash_mask;
30         struct hlist_head *cells;
31 };
32
33 /*----------------------------------------------------------------*/
34
35 static uint32_t calc_nr_buckets(unsigned nr_cells)
36 {
37         uint32_t n = 128;
38
39         nr_cells /= 4;
40         nr_cells = min(nr_cells, 8192u);
41
42         while (n < nr_cells)
43                 n <<= 1;
44
45         return n;
46 }
47
48 static struct kmem_cache *_cell_cache;
49
50 /*
51  * @nr_cells should be the number of cells you want in use _concurrently_.
52  * Don't confuse it with the number of distinct keys.
53  */
54 struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
55 {
56         unsigned i;
57         uint32_t nr_buckets = calc_nr_buckets(nr_cells);
58         size_t len = sizeof(struct dm_bio_prison) +
59                 (sizeof(struct hlist_head) * nr_buckets);
60         struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
61
62         if (!prison)
63                 return NULL;
64
65         spin_lock_init(&prison->lock);
66         prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
67         if (!prison->cell_pool) {
68                 kfree(prison);
69                 return NULL;
70         }
71
72         prison->nr_buckets = nr_buckets;
73         prison->hash_mask = nr_buckets - 1;
74         prison->cells = (struct hlist_head *) (prison + 1);
75         for (i = 0; i < nr_buckets; i++)
76                 INIT_HLIST_HEAD(prison->cells + i);
77
78         return prison;
79 }
80 EXPORT_SYMBOL_GPL(dm_bio_prison_create);
81
82 void dm_bio_prison_destroy(struct dm_bio_prison *prison)
83 {
84         mempool_destroy(prison->cell_pool);
85         kfree(prison);
86 }
87 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
88
89 struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
90 {
91         return mempool_alloc(prison->cell_pool, gfp);
92 }
93 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
94
95 void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
96                              struct dm_bio_prison_cell *cell)
97 {
98         mempool_free(cell, prison->cell_pool);
99 }
100 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
101
102 static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
103 {
104         const unsigned long BIG_PRIME = 4294967291UL;
105         uint64_t hash = key->block * BIG_PRIME;
106
107         return (uint32_t) (hash & prison->hash_mask);
108 }
109
110 static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
111 {
112                return (lhs->virtual == rhs->virtual) &&
113                        (lhs->dev == rhs->dev) &&
114                        (lhs->block == rhs->block);
115 }
116
117 static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
118                                                   struct dm_cell_key *key)
119 {
120         struct dm_bio_prison_cell *cell;
121
122         hlist_for_each_entry(cell, bucket, list)
123                 if (keys_equal(&cell->key, key))
124                         return cell;
125
126         return NULL;
127 }
128
129 static void __setup_new_cell(struct dm_bio_prison *prison,
130                              struct dm_cell_key *key,
131                              struct bio *holder,
132                              uint32_t hash,
133                              struct dm_bio_prison_cell *cell)
134 {
135         memcpy(&cell->key, key, sizeof(cell->key));
136         cell->holder = holder;
137         bio_list_init(&cell->bios);
138         hlist_add_head(&cell->list, prison->cells + hash);
139 }
140
141 static int __bio_detain(struct dm_bio_prison *prison,
142                         struct dm_cell_key *key,
143                         struct bio *inmate,
144                         struct dm_bio_prison_cell *cell_prealloc,
145                         struct dm_bio_prison_cell **cell_result)
146 {
147         uint32_t hash = hash_key(prison, key);
148         struct dm_bio_prison_cell *cell;
149
150         cell = __search_bucket(prison->cells + hash, key);
151         if (cell) {
152                 if (inmate)
153                         bio_list_add(&cell->bios, inmate);
154                 *cell_result = cell;
155                 return 1;
156         }
157
158         __setup_new_cell(prison, key, inmate, hash, cell_prealloc);
159         *cell_result = cell_prealloc;
160         return 0;
161 }
162
163 static int bio_detain(struct dm_bio_prison *prison,
164                       struct dm_cell_key *key,
165                       struct bio *inmate,
166                       struct dm_bio_prison_cell *cell_prealloc,
167                       struct dm_bio_prison_cell **cell_result)
168 {
169         int r;
170         unsigned long flags;
171
172         spin_lock_irqsave(&prison->lock, flags);
173         r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
174         spin_unlock_irqrestore(&prison->lock, flags);
175
176         return r;
177 }
178
179 int dm_bio_detain(struct dm_bio_prison *prison,
180                   struct dm_cell_key *key,
181                   struct bio *inmate,
182                   struct dm_bio_prison_cell *cell_prealloc,
183                   struct dm_bio_prison_cell **cell_result)
184 {
185         return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
186 }
187 EXPORT_SYMBOL_GPL(dm_bio_detain);
188
189 /*
190  * @inmates must have been initialised prior to this call
191  */
192 static void __cell_release(struct dm_bio_prison_cell *cell,
193                            struct bio_list *inmates)
194 {
195         hlist_del(&cell->list);
196
197         if (inmates) {
198                 if (cell->holder)
199                         bio_list_add(inmates, cell->holder);
200                 bio_list_merge(inmates, &cell->bios);
201         }
202 }
203
204 void dm_cell_release(struct dm_bio_prison *prison,
205                      struct dm_bio_prison_cell *cell,
206                      struct bio_list *bios)
207 {
208         unsigned long flags;
209
210         spin_lock_irqsave(&prison->lock, flags);
211         __cell_release(cell, bios);
212         spin_unlock_irqrestore(&prison->lock, flags);
213 }
214 EXPORT_SYMBOL_GPL(dm_cell_release);
215
216 /*
217  * Sometimes we don't want the holder, just the additional bios.
218  */
219 static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
220                                      struct bio_list *inmates)
221 {
222         hlist_del(&cell->list);
223         bio_list_merge(inmates, &cell->bios);
224 }
225
226 void dm_cell_release_no_holder(struct dm_bio_prison *prison,
227                                struct dm_bio_prison_cell *cell,
228                                struct bio_list *inmates)
229 {
230         unsigned long flags;
231
232         spin_lock_irqsave(&prison->lock, flags);
233         __cell_release_no_holder(cell, inmates);
234         spin_unlock_irqrestore(&prison->lock, flags);
235 }
236 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
237
238 void dm_cell_error(struct dm_bio_prison *prison,
239                    struct dm_bio_prison_cell *cell)
240 {
241         struct bio_list bios;
242         struct bio *bio;
243         unsigned long flags;
244
245         bio_list_init(&bios);
246
247         spin_lock_irqsave(&prison->lock, flags);
248         __cell_release(cell, &bios);
249         spin_unlock_irqrestore(&prison->lock, flags);
250
251         while ((bio = bio_list_pop(&bios)))
252                 bio_io_error(bio);
253 }
254 EXPORT_SYMBOL_GPL(dm_cell_error);
255
256 /*----------------------------------------------------------------*/
257
258 #define DEFERRED_SET_SIZE 64
259
260 struct dm_deferred_entry {
261         struct dm_deferred_set *ds;
262         unsigned count;
263         struct list_head work_items;
264 };
265
266 struct dm_deferred_set {
267         spinlock_t lock;
268         unsigned current_entry;
269         unsigned sweeper;
270         struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
271 };
272
273 struct dm_deferred_set *dm_deferred_set_create(void)
274 {
275         int i;
276         struct dm_deferred_set *ds;
277
278         ds = kmalloc(sizeof(*ds), GFP_KERNEL);
279         if (!ds)
280                 return NULL;
281
282         spin_lock_init(&ds->lock);
283         ds->current_entry = 0;
284         ds->sweeper = 0;
285         for (i = 0; i < DEFERRED_SET_SIZE; i++) {
286                 ds->entries[i].ds = ds;
287                 ds->entries[i].count = 0;
288                 INIT_LIST_HEAD(&ds->entries[i].work_items);
289         }
290
291         return ds;
292 }
293 EXPORT_SYMBOL_GPL(dm_deferred_set_create);
294
295 void dm_deferred_set_destroy(struct dm_deferred_set *ds)
296 {
297         kfree(ds);
298 }
299 EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
300
301 struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
302 {
303         unsigned long flags;
304         struct dm_deferred_entry *entry;
305
306         spin_lock_irqsave(&ds->lock, flags);
307         entry = ds->entries + ds->current_entry;
308         entry->count++;
309         spin_unlock_irqrestore(&ds->lock, flags);
310
311         return entry;
312 }
313 EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
314
315 static unsigned ds_next(unsigned index)
316 {
317         return (index + 1) % DEFERRED_SET_SIZE;
318 }
319
320 static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
321 {
322         while ((ds->sweeper != ds->current_entry) &&
323                !ds->entries[ds->sweeper].count) {
324                 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
325                 ds->sweeper = ds_next(ds->sweeper);
326         }
327
328         if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
329                 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
330 }
331
332 void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
333 {
334         unsigned long flags;
335
336         spin_lock_irqsave(&entry->ds->lock, flags);
337         BUG_ON(!entry->count);
338         --entry->count;
339         __sweep(entry->ds, head);
340         spin_unlock_irqrestore(&entry->ds->lock, flags);
341 }
342 EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
343
344 /*
345  * Returns 1 if deferred or 0 if no pending items to delay job.
346  */
347 int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
348 {
349         int r = 1;
350         unsigned long flags;
351         unsigned next_entry;
352
353         spin_lock_irqsave(&ds->lock, flags);
354         if ((ds->sweeper == ds->current_entry) &&
355             !ds->entries[ds->current_entry].count)
356                 r = 0;
357         else {
358                 list_add(work, &ds->entries[ds->current_entry].work_items);
359                 next_entry = ds_next(ds->current_entry);
360                 if (!ds->entries[next_entry].count)
361                         ds->current_entry = next_entry;
362         }
363         spin_unlock_irqrestore(&ds->lock, flags);
364
365         return r;
366 }
367 EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
368
369 /*----------------------------------------------------------------*/
370
371 static int __init dm_bio_prison_init(void)
372 {
373         _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
374         if (!_cell_cache)
375                 return -ENOMEM;
376
377         return 0;
378 }
379
380 static void __exit dm_bio_prison_exit(void)
381 {
382         kmem_cache_destroy(_cell_cache);
383         _cell_cache = NULL;
384 }
385
386 /*
387  * module hooks
388  */
389 module_init(dm_bio_prison_init);
390 module_exit(dm_bio_prison_exit);
391
392 MODULE_DESCRIPTION(DM_NAME " bio prison");
393 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
394 MODULE_LICENSE("GPL");