blk-mq: respect rq_affinity
[linux-2.6-microblaze.git] / block / blk-mq-sysfs.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15
16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19
20 struct blk_mq_ctx_sysfs_entry {
21         struct attribute attr;
22         ssize_t (*show)(struct blk_mq_ctx *, char *);
23         ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24 };
25
26 struct blk_mq_hw_ctx_sysfs_entry {
27         struct attribute attr;
28         ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29         ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30 };
31
32 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33                                  char *page)
34 {
35         struct blk_mq_ctx_sysfs_entry *entry;
36         struct blk_mq_ctx *ctx;
37         struct request_queue *q;
38         ssize_t res;
39
40         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42         q = ctx->queue;
43
44         if (!entry->show)
45                 return -EIO;
46
47         res = -ENOENT;
48         mutex_lock(&q->sysfs_lock);
49         if (!blk_queue_dying(q))
50                 res = entry->show(ctx, page);
51         mutex_unlock(&q->sysfs_lock);
52         return res;
53 }
54
55 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56                                   const char *page, size_t length)
57 {
58         struct blk_mq_ctx_sysfs_entry *entry;
59         struct blk_mq_ctx *ctx;
60         struct request_queue *q;
61         ssize_t res;
62
63         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65         q = ctx->queue;
66
67         if (!entry->store)
68                 return -EIO;
69
70         res = -ENOENT;
71         mutex_lock(&q->sysfs_lock);
72         if (!blk_queue_dying(q))
73                 res = entry->store(ctx, page, length);
74         mutex_unlock(&q->sysfs_lock);
75         return res;
76 }
77
78 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79                                     struct attribute *attr, char *page)
80 {
81         struct blk_mq_hw_ctx_sysfs_entry *entry;
82         struct blk_mq_hw_ctx *hctx;
83         struct request_queue *q;
84         ssize_t res;
85
86         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88         q = hctx->queue;
89
90         if (!entry->show)
91                 return -EIO;
92
93         res = -ENOENT;
94         mutex_lock(&q->sysfs_lock);
95         if (!blk_queue_dying(q))
96                 res = entry->show(hctx, page);
97         mutex_unlock(&q->sysfs_lock);
98         return res;
99 }
100
101 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102                                      struct attribute *attr, const char *page,
103                                      size_t length)
104 {
105         struct blk_mq_hw_ctx_sysfs_entry *entry;
106         struct blk_mq_hw_ctx *hctx;
107         struct request_queue *q;
108         ssize_t res;
109
110         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112         q = hctx->queue;
113
114         if (!entry->store)
115                 return -EIO;
116
117         res = -ENOENT;
118         mutex_lock(&q->sysfs_lock);
119         if (!blk_queue_dying(q))
120                 res = entry->store(hctx, page, length);
121         mutex_unlock(&q->sysfs_lock);
122         return res;
123 }
124
125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126 {
127         return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128                                 ctx->rq_dispatched[0]);
129 }
130
131 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132 {
133         return sprintf(page, "%lu\n", ctx->rq_merged);
134 }
135
136 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137 {
138         return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139                                 ctx->rq_completed[0]);
140 }
141
142 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
143 {
144         char *start_page = page;
145         struct request *rq;
146
147         page += sprintf(page, "%s:\n", msg);
148
149         list_for_each_entry(rq, list, queuelist)
150                 page += sprintf(page, "\t%p\n", rq);
151
152         return page - start_page;
153 }
154
155 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
156 {
157         ssize_t ret;
158
159         spin_lock(&ctx->lock);
160         ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
161         spin_unlock(&ctx->lock);
162
163         return ret;
164 }
165
166 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
167                                            char *page)
168 {
169         return sprintf(page, "%lu\n", hctx->queued);
170 }
171
172 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
173 {
174         return sprintf(page, "%lu\n", hctx->run);
175 }
176
177 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
178                                                char *page)
179 {
180         char *start_page = page;
181         int i;
182
183         page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
184
185         for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
186                 unsigned long d = 1U << (i - 1);
187
188                 page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
189         }
190
191         return page - start_page;
192 }
193
194 static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
195                                             char *page)
196 {
197         ssize_t ret;
198
199         spin_lock(&hctx->lock);
200         ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
201         spin_unlock(&hctx->lock);
202
203         return ret;
204 }
205
206 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
207 {
208         return blk_mq_tag_sysfs_show(hctx->tags, page);
209 }
210
211 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
212 {
213         unsigned int i, first = 1;
214         ssize_t ret = 0;
215
216         blk_mq_disable_hotplug();
217
218         for_each_cpu(i, hctx->cpumask) {
219                 if (first)
220                         ret += sprintf(ret + page, "%u", i);
221                 else
222                         ret += sprintf(ret + page, ", %u", i);
223
224                 first = 0;
225         }
226
227         blk_mq_enable_hotplug();
228
229         ret += sprintf(ret + page, "\n");
230         return ret;
231 }
232
233 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
234         .attr = {.name = "dispatched", .mode = S_IRUGO },
235         .show = blk_mq_sysfs_dispatched_show,
236 };
237 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
238         .attr = {.name = "merged", .mode = S_IRUGO },
239         .show = blk_mq_sysfs_merged_show,
240 };
241 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
242         .attr = {.name = "completed", .mode = S_IRUGO },
243         .show = blk_mq_sysfs_completed_show,
244 };
245 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
246         .attr = {.name = "rq_list", .mode = S_IRUGO },
247         .show = blk_mq_sysfs_rq_list_show,
248 };
249
250 static struct attribute *default_ctx_attrs[] = {
251         &blk_mq_sysfs_dispatched.attr,
252         &blk_mq_sysfs_merged.attr,
253         &blk_mq_sysfs_completed.attr,
254         &blk_mq_sysfs_rq_list.attr,
255         NULL,
256 };
257
258 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
259         .attr = {.name = "queued", .mode = S_IRUGO },
260         .show = blk_mq_hw_sysfs_queued_show,
261 };
262 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
263         .attr = {.name = "run", .mode = S_IRUGO },
264         .show = blk_mq_hw_sysfs_run_show,
265 };
266 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
267         .attr = {.name = "dispatched", .mode = S_IRUGO },
268         .show = blk_mq_hw_sysfs_dispatched_show,
269 };
270 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
271         .attr = {.name = "pending", .mode = S_IRUGO },
272         .show = blk_mq_hw_sysfs_rq_list_show,
273 };
274 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
275         .attr = {.name = "tags", .mode = S_IRUGO },
276         .show = blk_mq_hw_sysfs_tags_show,
277 };
278 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
279         .attr = {.name = "cpu_list", .mode = S_IRUGO },
280         .show = blk_mq_hw_sysfs_cpus_show,
281 };
282
283 static struct attribute *default_hw_ctx_attrs[] = {
284         &blk_mq_hw_sysfs_queued.attr,
285         &blk_mq_hw_sysfs_run.attr,
286         &blk_mq_hw_sysfs_dispatched.attr,
287         &blk_mq_hw_sysfs_pending.attr,
288         &blk_mq_hw_sysfs_tags.attr,
289         &blk_mq_hw_sysfs_cpus.attr,
290         NULL,
291 };
292
293 static const struct sysfs_ops blk_mq_sysfs_ops = {
294         .show   = blk_mq_sysfs_show,
295         .store  = blk_mq_sysfs_store,
296 };
297
298 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
299         .show   = blk_mq_hw_sysfs_show,
300         .store  = blk_mq_hw_sysfs_store,
301 };
302
303 static struct kobj_type blk_mq_ktype = {
304         .sysfs_ops      = &blk_mq_sysfs_ops,
305         .release        = blk_mq_sysfs_release,
306 };
307
308 static struct kobj_type blk_mq_ctx_ktype = {
309         .sysfs_ops      = &blk_mq_sysfs_ops,
310         .default_attrs  = default_ctx_attrs,
311         .release        = blk_mq_sysfs_release,
312 };
313
314 static struct kobj_type blk_mq_hw_ktype = {
315         .sysfs_ops      = &blk_mq_hw_sysfs_ops,
316         .default_attrs  = default_hw_ctx_attrs,
317         .release        = blk_mq_sysfs_release,
318 };
319
320 void blk_mq_unregister_disk(struct gendisk *disk)
321 {
322         struct request_queue *q = disk->queue;
323         struct blk_mq_hw_ctx *hctx;
324         struct blk_mq_ctx *ctx;
325         int i, j;
326
327         queue_for_each_hw_ctx(q, hctx, i) {
328                 hctx_for_each_ctx(hctx, ctx, j) {
329                         kobject_del(&ctx->kobj);
330                         kobject_put(&ctx->kobj);
331                 }
332                 kobject_del(&hctx->kobj);
333                 kobject_put(&hctx->kobj);
334         }
335
336         kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
337         kobject_del(&q->mq_kobj);
338         kobject_put(&q->mq_kobj);
339
340         kobject_put(&disk_to_dev(disk)->kobj);
341 }
342
343 int blk_mq_register_disk(struct gendisk *disk)
344 {
345         struct device *dev = disk_to_dev(disk);
346         struct request_queue *q = disk->queue;
347         struct blk_mq_hw_ctx *hctx;
348         struct blk_mq_ctx *ctx;
349         int ret, i, j;
350
351         kobject_init(&q->mq_kobj, &blk_mq_ktype);
352
353         ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
354         if (ret < 0)
355                 return ret;
356
357         kobject_uevent(&q->mq_kobj, KOBJ_ADD);
358
359         queue_for_each_hw_ctx(q, hctx, i) {
360                 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
361                 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i);
362                 if (ret)
363                         break;
364
365                 if (!hctx->nr_ctx)
366                         continue;
367
368                 hctx_for_each_ctx(hctx, ctx, j) {
369                         kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
370                         ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
371                         if (ret)
372                                 break;
373                 }
374         }
375
376         if (ret) {
377                 blk_mq_unregister_disk(disk);
378                 return ret;
379         }
380
381         return 0;
382 }