1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/percpu.h>
13 #include <linux/init.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/debugfs.h>
17 #include <linux/export.h>
18 #include <linux/time.h>
19 #include <linux/uaccess.h>
20 #include <linux/list.h>
21 #include <linux/blk-cgroup.h>
23 #include "../../block/blk.h"
25 #include <trace/events/block.h>
27 #include "trace_output.h"
29 #ifdef CONFIG_BLK_DEV_IO_TRACE
31 static unsigned int blktrace_seq __read_mostly = 1;
33 static struct trace_array *blk_tr;
34 static bool blk_tracer_enabled __read_mostly;
36 static LIST_HEAD(running_trace_list);
37 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
39 /* Select an alternative, minimalistic output than the original one */
40 #define TRACE_BLK_OPT_CLASSIC 0x1
41 #define TRACE_BLK_OPT_CGROUP 0x2
42 #define TRACE_BLK_OPT_CGNAME 0x4
44 static struct tracer_opt blk_tracer_opts[] = {
45 /* Default disable the minimalistic output */
46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47 #ifdef CONFIG_BLK_CGROUP
48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
54 static struct tracer_flags blk_tracer_flags = {
56 .opts = blk_tracer_opts,
59 /* Global reference count of probes */
60 static DEFINE_MUTEX(blk_probe_mutex);
61 static int blk_probes_ref;
63 static void blk_register_tracepoints(void);
64 static void blk_unregister_tracepoints(void);
67 * Send out a notify message.
69 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
70 const void *data, size_t len, u64 cgid)
72 struct blk_io_trace *t;
73 struct ring_buffer_event *event = NULL;
74 struct trace_buffer *buffer = NULL;
75 unsigned int trace_ctx = 0;
76 int cpu = smp_processor_id();
77 bool blk_tracer = blk_tracer_enabled;
78 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
81 buffer = blk_tr->array_buffer.buffer;
82 trace_ctx = tracing_gen_ctx_flags(0);
83 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
84 sizeof(*t) + len + cgid_len,
88 t = ring_buffer_event_data(event);
95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
98 t->time = ktime_to_ns(ktime_get());
101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
104 t->pdu_len = len + cgid_len;
106 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
110 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
115 * Send out a notify for this process, if we haven't done so since a trace
118 static void trace_note_tsk(struct task_struct *tsk)
121 struct blk_trace *bt;
123 tsk->btrace_seq = blktrace_seq;
124 spin_lock_irqsave(&running_trace_lock, flags);
125 list_for_each_entry(bt, &running_trace_list, running_list) {
126 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
127 sizeof(tsk->comm), 0);
129 spin_unlock_irqrestore(&running_trace_lock, flags);
132 static void trace_note_time(struct blk_trace *bt)
134 struct timespec64 now;
138 /* need to check user space to see if this breaks in y2038 or y2106 */
139 ktime_get_real_ts64(&now);
140 words[0] = (u32)now.tv_sec;
141 words[1] = now.tv_nsec;
143 local_irq_save(flags);
144 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
145 local_irq_restore(flags);
148 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
149 const char *fmt, ...)
156 if (unlikely(bt->trace_state != Blktrace_running &&
157 !blk_tracer_enabled))
161 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
162 * message to the trace.
164 if (!(bt->act_mask & BLK_TC_NOTIFY))
167 local_irq_save(flags);
168 buf = this_cpu_ptr(bt->msg_data);
170 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
173 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
175 #ifdef CONFIG_BLK_CGROUP
176 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n,
177 blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
179 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, 0);
181 local_irq_restore(flags);
183 EXPORT_SYMBOL_GPL(__trace_note_message);
185 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
188 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
190 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
192 if (bt->pid && pid != bt->pid)
199 * Data direction bit lookup
201 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
202 BLK_TC_ACT(BLK_TC_WRITE) };
204 #define BLK_TC_RAHEAD BLK_TC_AHEAD
205 #define BLK_TC_PREFLUSH BLK_TC_FLUSH
207 /* The ilog2() calls fall out because they're constant */
208 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
209 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
212 * The worker for the various blk_add_trace*() types. Fills out a
213 * blk_io_trace structure and places it in a per-cpu subbuffer.
215 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
216 int op, int op_flags, u32 what, int error, int pdu_len,
217 void *pdu_data, u64 cgid)
219 struct task_struct *tsk = current;
220 struct ring_buffer_event *event = NULL;
221 struct trace_buffer *buffer = NULL;
222 struct blk_io_trace *t;
223 unsigned long flags = 0;
224 unsigned long *sequence;
225 unsigned int trace_ctx = 0;
228 bool blk_tracer = blk_tracer_enabled;
229 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
231 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
234 what |= ddir_act[op_is_write(op) ? WRITE : READ];
235 what |= MASK_TC_BIT(op_flags, SYNC);
236 what |= MASK_TC_BIT(op_flags, RAHEAD);
237 what |= MASK_TC_BIT(op_flags, META);
238 what |= MASK_TC_BIT(op_flags, PREFLUSH);
239 what |= MASK_TC_BIT(op_flags, FUA);
240 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
241 what |= BLK_TC_ACT(BLK_TC_DISCARD);
242 if (op == REQ_OP_FLUSH)
243 what |= BLK_TC_ACT(BLK_TC_FLUSH);
245 what |= __BLK_TA_CGROUP;
248 if (act_log_check(bt, what, sector, pid))
250 cpu = raw_smp_processor_id();
253 tracing_record_cmdline(current);
255 buffer = blk_tr->array_buffer.buffer;
256 trace_ctx = tracing_gen_ctx_flags(0);
257 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
258 sizeof(*t) + pdu_len + cgid_len,
262 t = ring_buffer_event_data(event);
266 if (unlikely(tsk->btrace_seq != blktrace_seq))
270 * A word about the locking here - we disable interrupts to reserve
271 * some space in the relay per-cpu buffer, to prevent an irq
272 * from coming in and stepping on our toes.
274 local_irq_save(flags);
275 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
277 sequence = per_cpu_ptr(bt->sequence, cpu);
279 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
280 t->sequence = ++(*sequence);
281 t->time = ktime_to_ns(ktime_get());
284 * These two are not needed in ftrace as they are in the
285 * generic trace_entry, filled by tracing_generic_entry_update,
286 * but for the trace_event->bin() synthesizer benefit we do it
297 t->pdu_len = pdu_len + cgid_len;
300 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
302 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
305 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
310 local_irq_restore(flags);
313 static void blk_trace_free(struct blk_trace *bt)
315 relay_close(bt->rchan);
316 debugfs_remove(bt->dir);
317 free_percpu(bt->sequence);
318 free_percpu(bt->msg_data);
322 static void get_probe_ref(void)
324 mutex_lock(&blk_probe_mutex);
325 if (++blk_probes_ref == 1)
326 blk_register_tracepoints();
327 mutex_unlock(&blk_probe_mutex);
330 static void put_probe_ref(void)
332 mutex_lock(&blk_probe_mutex);
333 if (!--blk_probes_ref)
334 blk_unregister_tracepoints();
335 mutex_unlock(&blk_probe_mutex);
338 static void blk_trace_cleanup(struct blk_trace *bt)
345 static int __blk_trace_remove(struct request_queue *q)
347 struct blk_trace *bt;
349 bt = rcu_replace_pointer(q->blk_trace, NULL,
350 lockdep_is_held(&q->debugfs_mutex));
354 if (bt->trace_state != Blktrace_running)
355 blk_trace_cleanup(bt);
360 int blk_trace_remove(struct request_queue *q)
364 mutex_lock(&q->debugfs_mutex);
365 ret = __blk_trace_remove(q);
366 mutex_unlock(&q->debugfs_mutex);
370 EXPORT_SYMBOL_GPL(blk_trace_remove);
372 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
373 size_t count, loff_t *ppos)
375 struct blk_trace *bt = filp->private_data;
378 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
380 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
383 static const struct file_operations blk_dropped_fops = {
384 .owner = THIS_MODULE,
386 .read = blk_dropped_read,
387 .llseek = default_llseek,
390 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
391 size_t count, loff_t *ppos)
394 struct blk_trace *bt;
396 if (count >= BLK_TN_MAX_MSG)
399 msg = memdup_user_nul(buffer, count);
403 bt = filp->private_data;
404 __trace_note_message(bt, NULL, "%s", msg);
410 static const struct file_operations blk_msg_fops = {
411 .owner = THIS_MODULE,
413 .write = blk_msg_write,
414 .llseek = noop_llseek,
418 * Keep track of how many times we encountered a full subbuffer, to aid
419 * the user space app in telling how many lost events there were.
421 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
422 void *prev_subbuf, size_t prev_padding)
424 struct blk_trace *bt;
426 if (!relay_buf_full(buf))
429 bt = buf->chan->private_data;
430 atomic_inc(&bt->dropped);
434 static int blk_remove_buf_file_callback(struct dentry *dentry)
436 debugfs_remove(dentry);
441 static struct dentry *blk_create_buf_file_callback(const char *filename,
442 struct dentry *parent,
444 struct rchan_buf *buf,
447 return debugfs_create_file(filename, mode, parent, buf,
448 &relay_file_operations);
451 static const struct rchan_callbacks blk_relay_callbacks = {
452 .subbuf_start = blk_subbuf_start_callback,
453 .create_buf_file = blk_create_buf_file_callback,
454 .remove_buf_file = blk_remove_buf_file_callback,
457 static void blk_trace_setup_lba(struct blk_trace *bt,
458 struct block_device *bdev)
461 bt->start_lba = bdev->bd_start_sect;
462 bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
470 * Setup everything required to start tracing
472 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
473 struct block_device *bdev,
474 struct blk_user_trace_setup *buts)
476 struct blk_trace *bt = NULL;
477 struct dentry *dir = NULL;
480 lockdep_assert_held(&q->debugfs_mutex);
482 if (!buts->buf_size || !buts->buf_nr)
485 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
486 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
489 * some device names have larger paths - convert the slashes
490 * to underscores for this to work as expected
492 strreplace(buts->name, '/', '_');
495 * bdev can be NULL, as with scsi-generic, this is a helpful as
498 if (rcu_dereference_protected(q->blk_trace,
499 lockdep_is_held(&q->debugfs_mutex))) {
500 pr_warn("Concurrent blktraces are not allowed on %s\n",
505 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
510 bt->sequence = alloc_percpu(unsigned long);
514 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
519 * When tracing the whole disk reuse the existing debugfs directory
520 * created by the block layer on init. For partitions block devices,
521 * and scsi-generic block devices we create a temporary new debugfs
522 * directory that will be removed once the trace ends.
524 if (bdev && !bdev_is_partition(bdev))
525 dir = q->debugfs_dir;
527 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
530 * As blktrace relies on debugfs for its interface the debugfs directory
531 * is required, contrary to the usual mantra of not checking for debugfs
532 * files or directories.
534 if (IS_ERR_OR_NULL(dir)) {
535 pr_warn("debugfs_dir not present for %s so skipping\n",
542 atomic_set(&bt->dropped, 0);
543 INIT_LIST_HEAD(&bt->running_list);
546 debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
547 debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
549 bt->rchan = relay_open("trace", dir, buts->buf_size,
550 buts->buf_nr, &blk_relay_callbacks, bt);
554 bt->act_mask = buts->act_mask;
556 bt->act_mask = (u16) -1;
558 blk_trace_setup_lba(bt, bdev);
560 /* overwrite with user settings */
562 bt->start_lba = buts->start_lba;
564 bt->end_lba = buts->end_lba;
567 bt->trace_state = Blktrace_setup;
569 rcu_assign_pointer(q->blk_trace, bt);
579 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
580 struct block_device *bdev, char __user *arg)
582 struct blk_user_trace_setup buts;
585 ret = copy_from_user(&buts, arg, sizeof(buts));
589 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
593 if (copy_to_user(arg, &buts, sizeof(buts))) {
594 __blk_trace_remove(q);
600 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
601 struct block_device *bdev,
606 mutex_lock(&q->debugfs_mutex);
607 ret = __blk_trace_setup(q, name, dev, bdev, arg);
608 mutex_unlock(&q->debugfs_mutex);
612 EXPORT_SYMBOL_GPL(blk_trace_setup);
614 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
615 static int compat_blk_trace_setup(struct request_queue *q, char *name,
616 dev_t dev, struct block_device *bdev,
619 struct blk_user_trace_setup buts;
620 struct compat_blk_user_trace_setup cbuts;
623 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
626 buts = (struct blk_user_trace_setup) {
627 .act_mask = cbuts.act_mask,
628 .buf_size = cbuts.buf_size,
629 .buf_nr = cbuts.buf_nr,
630 .start_lba = cbuts.start_lba,
631 .end_lba = cbuts.end_lba,
635 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
639 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
640 __blk_trace_remove(q);
648 static int __blk_trace_startstop(struct request_queue *q, int start)
651 struct blk_trace *bt;
653 bt = rcu_dereference_protected(q->blk_trace,
654 lockdep_is_held(&q->debugfs_mutex));
659 * For starting a trace, we can transition from a setup or stopped
660 * trace. For stopping a trace, the state must be running
664 if (bt->trace_state == Blktrace_setup ||
665 bt->trace_state == Blktrace_stopped) {
668 bt->trace_state = Blktrace_running;
669 spin_lock_irq(&running_trace_lock);
670 list_add(&bt->running_list, &running_trace_list);
671 spin_unlock_irq(&running_trace_lock);
677 if (bt->trace_state == Blktrace_running) {
678 bt->trace_state = Blktrace_stopped;
679 spin_lock_irq(&running_trace_lock);
680 list_del_init(&bt->running_list);
681 spin_unlock_irq(&running_trace_lock);
682 relay_flush(bt->rchan);
690 int blk_trace_startstop(struct request_queue *q, int start)
694 mutex_lock(&q->debugfs_mutex);
695 ret = __blk_trace_startstop(q, start);
696 mutex_unlock(&q->debugfs_mutex);
700 EXPORT_SYMBOL_GPL(blk_trace_startstop);
703 * When reading or writing the blktrace sysfs files, the references to the
704 * opened sysfs or device files should prevent the underlying block device
705 * from being removed. So no further delete protection is really needed.
709 * blk_trace_ioctl: - handle the ioctls associated with tracing
710 * @bdev: the block device
711 * @cmd: the ioctl cmd
712 * @arg: the argument data, if any
715 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
717 struct request_queue *q;
719 char b[BDEVNAME_SIZE];
721 q = bdev_get_queue(bdev);
725 mutex_lock(&q->debugfs_mutex);
730 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
732 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
733 case BLKTRACESETUP32:
735 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
742 ret = __blk_trace_startstop(q, start);
744 case BLKTRACETEARDOWN:
745 ret = __blk_trace_remove(q);
752 mutex_unlock(&q->debugfs_mutex);
757 * blk_trace_shutdown: - stop and cleanup trace structures
758 * @q: the request queue associated with the device
761 void blk_trace_shutdown(struct request_queue *q)
763 mutex_lock(&q->debugfs_mutex);
764 if (rcu_dereference_protected(q->blk_trace,
765 lockdep_is_held(&q->debugfs_mutex))) {
766 __blk_trace_startstop(q, 0);
767 __blk_trace_remove(q);
770 mutex_unlock(&q->debugfs_mutex);
773 #ifdef CONFIG_BLK_CGROUP
774 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
776 struct blk_trace *bt;
778 /* We don't use the 'bt' value here except as an optimization... */
779 bt = rcu_dereference_protected(q->blk_trace, 1);
780 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
785 return cgroup_id(bio_blkcg(bio)->css.cgroup);
788 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
795 blk_trace_request_get_cgid(struct request *rq)
799 /* Use the first bio */
800 return blk_trace_bio_get_cgid(rq->q, rq->bio);
808 * blk_add_trace_rq - Add a trace for a request oriented action
809 * @rq: the source request
810 * @error: return status to log
811 * @nr_bytes: number of completed bytes
813 * @cgid: the cgroup info
816 * Records an action against a request. Will log the bio offset + size.
819 static void blk_add_trace_rq(struct request *rq, int error,
820 unsigned int nr_bytes, u32 what, u64 cgid)
822 struct blk_trace *bt;
825 bt = rcu_dereference(rq->q->blk_trace);
831 if (blk_rq_is_passthrough(rq))
832 what |= BLK_TC_ACT(BLK_TC_PC);
834 what |= BLK_TC_ACT(BLK_TC_FS);
836 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
837 rq->cmd_flags, what, error, 0, NULL, cgid);
841 static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
843 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
844 blk_trace_request_get_cgid(rq));
847 static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
849 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
850 blk_trace_request_get_cgid(rq));
853 static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
855 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
856 blk_trace_request_get_cgid(rq));
859 static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
861 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
862 blk_trace_request_get_cgid(rq));
865 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
866 int error, unsigned int nr_bytes)
868 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
869 blk_trace_request_get_cgid(rq));
873 * blk_add_trace_bio - Add a trace for a bio oriented action
874 * @q: queue the io is for
875 * @bio: the source bio
877 * @error: error, if any
880 * Records an action against a bio. Will log the bio offset + size.
883 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
886 struct blk_trace *bt;
889 bt = rcu_dereference(q->blk_trace);
895 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
896 bio_op(bio), bio->bi_opf, what, error, 0, NULL,
897 blk_trace_bio_get_cgid(q, bio));
901 static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
903 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
906 static void blk_add_trace_bio_complete(void *ignore,
907 struct request_queue *q, struct bio *bio)
909 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
910 blk_status_to_errno(bio->bi_status));
913 static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
915 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
919 static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
921 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
925 static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
927 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
930 static void blk_add_trace_getrq(void *ignore, struct bio *bio)
932 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
935 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
937 struct blk_trace *bt;
940 bt = rcu_dereference(q->blk_trace);
942 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
946 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
947 unsigned int depth, bool explicit)
949 struct blk_trace *bt;
952 bt = rcu_dereference(q->blk_trace);
954 __be64 rpdu = cpu_to_be64(depth);
958 what = BLK_TA_UNPLUG_IO;
960 what = BLK_TA_UNPLUG_TIMER;
962 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
967 static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
969 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
970 struct blk_trace *bt;
973 bt = rcu_dereference(q->blk_trace);
975 __be64 rpdu = cpu_to_be64(pdu);
977 __blk_add_trace(bt, bio->bi_iter.bi_sector,
978 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
980 blk_status_to_errno(bio->bi_status),
982 blk_trace_bio_get_cgid(q, bio));
988 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
989 * @ignore: trace callback data parameter (not used)
990 * @bio: the source bio
991 * @dev: source device
992 * @from: source sector
994 * Called after a bio is remapped to a different device and/or sector.
996 static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
999 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1000 struct blk_trace *bt;
1001 struct blk_io_trace_remap r;
1004 bt = rcu_dereference(q->blk_trace);
1010 r.device_from = cpu_to_be32(dev);
1011 r.device_to = cpu_to_be32(bio_dev(bio));
1012 r.sector_from = cpu_to_be64(from);
1014 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1015 bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
1016 blk_status_to_errno(bio->bi_status),
1017 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1022 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1023 * @ignore: trace callback data parameter (not used)
1024 * @rq: the source request
1025 * @dev: target device
1026 * @from: source sector
1029 * Device mapper remaps request to other devices.
1030 * Add a trace for that action.
1033 static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
1036 struct blk_trace *bt;
1037 struct blk_io_trace_remap r;
1040 bt = rcu_dereference(rq->q->blk_trace);
1046 r.device_from = cpu_to_be32(dev);
1047 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
1048 r.sector_from = cpu_to_be64(from);
1050 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1051 rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1052 sizeof(r), &r, blk_trace_request_get_cgid(rq));
1057 * blk_add_driver_data - Add binary message with driver-specific data
1059 * @data: driver-specific data
1060 * @len: length of driver-specific data
1063 * Some drivers might want to write driver-specific data per request.
1066 void blk_add_driver_data(struct request *rq, void *data, size_t len)
1068 struct blk_trace *bt;
1071 bt = rcu_dereference(rq->q->blk_trace);
1077 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1078 BLK_TA_DRV_DATA, 0, len, data,
1079 blk_trace_request_get_cgid(rq));
1082 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1084 static void blk_register_tracepoints(void)
1088 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1090 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1092 ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1094 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1096 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1098 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1100 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1102 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1104 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1106 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1108 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1110 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1112 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1114 ret = register_trace_block_split(blk_add_trace_split, NULL);
1116 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1118 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1122 static void blk_unregister_tracepoints(void)
1124 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1125 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1126 unregister_trace_block_split(blk_add_trace_split, NULL);
1127 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1128 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1129 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1130 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1131 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1132 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1133 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1134 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1135 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1136 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1137 unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1138 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1139 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1141 tracepoint_synchronize_unregister();
1145 * struct blk_io_tracer formatting routines
1148 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1151 int tc = t->action >> BLK_TC_SHIFT;
1153 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1158 if (tc & BLK_TC_FLUSH)
1161 if (tc & BLK_TC_DISCARD)
1163 else if (tc & BLK_TC_WRITE)
1170 if (tc & BLK_TC_FUA)
1172 if (tc & BLK_TC_AHEAD)
1174 if (tc & BLK_TC_SYNC)
1176 if (tc & BLK_TC_META)
1183 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1185 return (const struct blk_io_trace *)ent;
1188 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1190 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1193 static inline u64 t_cgid(const struct trace_entry *ent)
1195 return *(u64 *)(te_blk_io_trace(ent) + 1);
1198 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1200 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1203 static inline u32 t_action(const struct trace_entry *ent)
1205 return te_blk_io_trace(ent)->action;
1208 static inline u32 t_bytes(const struct trace_entry *ent)
1210 return te_blk_io_trace(ent)->bytes;
1213 static inline u32 t_sec(const struct trace_entry *ent)
1215 return te_blk_io_trace(ent)->bytes >> 9;
1218 static inline unsigned long long t_sector(const struct trace_entry *ent)
1220 return te_blk_io_trace(ent)->sector;
1223 static inline __u16 t_error(const struct trace_entry *ent)
1225 return te_blk_io_trace(ent)->error;
1228 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1230 const __be64 *val = pdu_start(ent, has_cg);
1231 return be64_to_cpu(*val);
1234 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1237 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1240 char rwbs[RWBS_LEN];
1241 unsigned long long ts = iter->ts;
1242 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1243 unsigned secs = (unsigned long)ts;
1244 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1248 trace_seq_printf(&iter->seq,
1249 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1250 MAJOR(t->device), MINOR(t->device), iter->cpu,
1251 secs, nsec_rem, iter->ent->pid, act, rwbs);
1254 static void blk_log_action(struct trace_iterator *iter, const char *act,
1257 char rwbs[RWBS_LEN];
1258 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1262 u64 id = t_cgid(iter->ent);
1264 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1265 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1267 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1268 sizeof(blkcg_name_buf));
1269 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1270 MAJOR(t->device), MINOR(t->device),
1271 blkcg_name_buf, act, rwbs);
1274 * The cgid portion used to be "INO,GEN". Userland
1275 * builds a FILEID_INO32_GEN fid out of them and
1276 * opens the cgroup using open_by_handle_at(2).
1277 * While 32bit ino setups are still the same, 64bit
1278 * ones now use the 64bit ino as the whole ID and
1279 * no longer use generation.
1281 * Regardless of the content, always output
1282 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1283 * be mapped back to @id on both 64 and 32bit ino
1284 * setups. See __kernfs_fh_to_dentry().
1286 trace_seq_printf(&iter->seq,
1287 "%3d,%-3d %llx,%-llx %2s %3s ",
1288 MAJOR(t->device), MINOR(t->device),
1289 id & U32_MAX, id >> 32, act, rwbs);
1292 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1293 MAJOR(t->device), MINOR(t->device), act, rwbs);
1296 static void blk_log_dump_pdu(struct trace_seq *s,
1297 const struct trace_entry *ent, bool has_cg)
1299 const unsigned char *pdu_buf;
1303 pdu_buf = pdu_start(ent, has_cg);
1304 pdu_len = pdu_real_len(ent, has_cg);
1309 /* find the last zero that needs to be printed */
1310 for (end = pdu_len - 1; end >= 0; end--)
1315 trace_seq_putc(s, '(');
1317 for (i = 0; i < pdu_len; i++) {
1319 trace_seq_printf(s, "%s%02x",
1320 i == 0 ? "" : " ", pdu_buf[i]);
1323 * stop when the rest is just zeros and indicate so
1324 * with a ".." appended
1326 if (i == end && end != pdu_len - 1) {
1327 trace_seq_puts(s, " ..) ");
1332 trace_seq_puts(s, ") ");
1335 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1337 char cmd[TASK_COMM_LEN];
1339 trace_find_cmdline(ent->pid, cmd);
1341 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1342 trace_seq_printf(s, "%u ", t_bytes(ent));
1343 blk_log_dump_pdu(s, ent, has_cg);
1344 trace_seq_printf(s, "[%s]\n", cmd);
1347 trace_seq_printf(s, "%llu + %u [%s]\n",
1348 t_sector(ent), t_sec(ent), cmd);
1350 trace_seq_printf(s, "[%s]\n", cmd);
1354 static void blk_log_with_error(struct trace_seq *s,
1355 const struct trace_entry *ent, bool has_cg)
1357 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1358 blk_log_dump_pdu(s, ent, has_cg);
1359 trace_seq_printf(s, "[%d]\n", t_error(ent));
1362 trace_seq_printf(s, "%llu + %u [%d]\n",
1364 t_sec(ent), t_error(ent));
1366 trace_seq_printf(s, "%llu [%d]\n",
1367 t_sector(ent), t_error(ent));
1371 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1373 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1375 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1376 t_sector(ent), t_sec(ent),
1377 MAJOR(be32_to_cpu(__r->device_from)),
1378 MINOR(be32_to_cpu(__r->device_from)),
1379 be64_to_cpu(__r->sector_from));
1382 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1384 char cmd[TASK_COMM_LEN];
1386 trace_find_cmdline(ent->pid, cmd);
1388 trace_seq_printf(s, "[%s]\n", cmd);
1391 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1393 char cmd[TASK_COMM_LEN];
1395 trace_find_cmdline(ent->pid, cmd);
1397 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1400 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1402 char cmd[TASK_COMM_LEN];
1404 trace_find_cmdline(ent->pid, cmd);
1406 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1407 get_pdu_int(ent, has_cg), cmd);
1410 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1414 trace_seq_putmem(s, pdu_start(ent, has_cg),
1415 pdu_real_len(ent, has_cg));
1416 trace_seq_putc(s, '\n');
1420 * struct tracer operations
1423 static void blk_tracer_print_header(struct seq_file *m)
1425 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1427 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1431 static void blk_tracer_start(struct trace_array *tr)
1433 blk_tracer_enabled = true;
1436 static int blk_tracer_init(struct trace_array *tr)
1439 blk_tracer_start(tr);
1443 static void blk_tracer_stop(struct trace_array *tr)
1445 blk_tracer_enabled = false;
1448 static void blk_tracer_reset(struct trace_array *tr)
1450 blk_tracer_stop(tr);
1453 static const struct {
1455 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1458 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1459 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1460 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1461 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1462 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1463 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1464 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1465 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1466 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1467 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1468 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1469 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1470 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1471 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1472 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1475 static enum print_line_t print_one_line(struct trace_iterator *iter,
1478 struct trace_array *tr = iter->tr;
1479 struct trace_seq *s = &iter->seq;
1480 const struct blk_io_trace *t;
1483 blk_log_action_t *log_action;
1486 t = te_blk_io_trace(iter->ent);
1487 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1488 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1489 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1490 has_cg = t->action & __BLK_TA_CGROUP;
1492 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1493 log_action(iter, long_act ? "message" : "m", has_cg);
1494 blk_log_msg(s, iter->ent, has_cg);
1495 return trace_handle_return(s);
1498 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1499 trace_seq_printf(s, "Unknown action %x\n", what);
1501 log_action(iter, what2act[what].act[long_act], has_cg);
1502 what2act[what].print(s, iter->ent, has_cg);
1505 return trace_handle_return(s);
1508 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1509 int flags, struct trace_event *event)
1511 return print_one_line(iter, false);
1514 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1516 struct trace_seq *s = &iter->seq;
1517 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1518 const int offset = offsetof(struct blk_io_trace, sector);
1519 struct blk_io_trace old = {
1520 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1524 trace_seq_putmem(s, &old, offset);
1525 trace_seq_putmem(s, &t->sector,
1526 sizeof(old) - offset + t->pdu_len);
1529 static enum print_line_t
1530 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1531 struct trace_event *event)
1533 blk_trace_synthesize_old_trace(iter);
1535 return trace_handle_return(&iter->seq);
1538 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1540 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1541 return TRACE_TYPE_UNHANDLED;
1543 return print_one_line(iter, true);
1547 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1549 /* don't output context-info for blk_classic output */
1550 if (bit == TRACE_BLK_OPT_CLASSIC) {
1552 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1554 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1559 static struct tracer blk_tracer __read_mostly = {
1561 .init = blk_tracer_init,
1562 .reset = blk_tracer_reset,
1563 .start = blk_tracer_start,
1564 .stop = blk_tracer_stop,
1565 .print_header = blk_tracer_print_header,
1566 .print_line = blk_tracer_print_line,
1567 .flags = &blk_tracer_flags,
1568 .set_flag = blk_tracer_set_flag,
1571 static struct trace_event_functions trace_blk_event_funcs = {
1572 .trace = blk_trace_event_print,
1573 .binary = blk_trace_event_print_binary,
1576 static struct trace_event trace_blk_event = {
1578 .funcs = &trace_blk_event_funcs,
1581 static int __init init_blk_tracer(void)
1583 if (!register_trace_event(&trace_blk_event)) {
1584 pr_warn("Warning: could not register block events\n");
1588 if (register_tracer(&blk_tracer) != 0) {
1589 pr_warn("Warning: could not register the block tracer\n");
1590 unregister_trace_event(&trace_blk_event);
1597 device_initcall(init_blk_tracer);
1599 static int blk_trace_remove_queue(struct request_queue *q)
1601 struct blk_trace *bt;
1603 bt = rcu_replace_pointer(q->blk_trace, NULL,
1604 lockdep_is_held(&q->debugfs_mutex));
1615 * Setup everything required to start tracing
1617 static int blk_trace_setup_queue(struct request_queue *q,
1618 struct block_device *bdev)
1620 struct blk_trace *bt = NULL;
1623 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1627 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1631 bt->dev = bdev->bd_dev;
1632 bt->act_mask = (u16)-1;
1634 blk_trace_setup_lba(bt, bdev);
1636 rcu_assign_pointer(q->blk_trace, bt);
1646 * sysfs interface to enable and configure tracing
1649 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1650 struct device_attribute *attr,
1652 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1653 struct device_attribute *attr,
1654 const char *buf, size_t count);
1655 #define BLK_TRACE_DEVICE_ATTR(_name) \
1656 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1657 sysfs_blk_trace_attr_show, \
1658 sysfs_blk_trace_attr_store)
1660 static BLK_TRACE_DEVICE_ATTR(enable);
1661 static BLK_TRACE_DEVICE_ATTR(act_mask);
1662 static BLK_TRACE_DEVICE_ATTR(pid);
1663 static BLK_TRACE_DEVICE_ATTR(start_lba);
1664 static BLK_TRACE_DEVICE_ATTR(end_lba);
1666 static struct attribute *blk_trace_attrs[] = {
1667 &dev_attr_enable.attr,
1668 &dev_attr_act_mask.attr,
1670 &dev_attr_start_lba.attr,
1671 &dev_attr_end_lba.attr,
1675 struct attribute_group blk_trace_attr_group = {
1677 .attrs = blk_trace_attrs,
1680 static const struct {
1684 { BLK_TC_READ, "read" },
1685 { BLK_TC_WRITE, "write" },
1686 { BLK_TC_FLUSH, "flush" },
1687 { BLK_TC_SYNC, "sync" },
1688 { BLK_TC_QUEUE, "queue" },
1689 { BLK_TC_REQUEUE, "requeue" },
1690 { BLK_TC_ISSUE, "issue" },
1691 { BLK_TC_COMPLETE, "complete" },
1692 { BLK_TC_FS, "fs" },
1693 { BLK_TC_PC, "pc" },
1694 { BLK_TC_NOTIFY, "notify" },
1695 { BLK_TC_AHEAD, "ahead" },
1696 { BLK_TC_META, "meta" },
1697 { BLK_TC_DISCARD, "discard" },
1698 { BLK_TC_DRV_DATA, "drv_data" },
1699 { BLK_TC_FUA, "fua" },
1702 static int blk_trace_str2mask(const char *str)
1706 char *buf, *s, *token;
1708 buf = kstrdup(str, GFP_KERNEL);
1714 token = strsep(&s, ",");
1721 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1722 if (strcasecmp(token, mask_maps[i].str) == 0) {
1723 mask |= mask_maps[i].mask;
1727 if (i == ARRAY_SIZE(mask_maps)) {
1737 static ssize_t blk_trace_mask2str(char *buf, int mask)
1742 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1743 if (mask & mask_maps[i].mask) {
1744 p += sprintf(p, "%s%s",
1745 (p == buf) ? "" : ",", mask_maps[i].str);
1753 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1754 struct device_attribute *attr,
1757 struct block_device *bdev = dev_to_bdev(dev);
1758 struct request_queue *q = bdev_get_queue(bdev);
1759 struct blk_trace *bt;
1760 ssize_t ret = -ENXIO;
1762 mutex_lock(&q->debugfs_mutex);
1764 bt = rcu_dereference_protected(q->blk_trace,
1765 lockdep_is_held(&q->debugfs_mutex));
1766 if (attr == &dev_attr_enable) {
1767 ret = sprintf(buf, "%u\n", !!bt);
1768 goto out_unlock_bdev;
1772 ret = sprintf(buf, "disabled\n");
1773 else if (attr == &dev_attr_act_mask)
1774 ret = blk_trace_mask2str(buf, bt->act_mask);
1775 else if (attr == &dev_attr_pid)
1776 ret = sprintf(buf, "%u\n", bt->pid);
1777 else if (attr == &dev_attr_start_lba)
1778 ret = sprintf(buf, "%llu\n", bt->start_lba);
1779 else if (attr == &dev_attr_end_lba)
1780 ret = sprintf(buf, "%llu\n", bt->end_lba);
1783 mutex_unlock(&q->debugfs_mutex);
1787 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1788 struct device_attribute *attr,
1789 const char *buf, size_t count)
1791 struct block_device *bdev = dev_to_bdev(dev);
1792 struct request_queue *q = bdev_get_queue(bdev);
1793 struct blk_trace *bt;
1795 ssize_t ret = -EINVAL;
1800 if (attr == &dev_attr_act_mask) {
1801 if (kstrtoull(buf, 0, &value)) {
1802 /* Assume it is a list of trace category names */
1803 ret = blk_trace_str2mask(buf);
1809 if (kstrtoull(buf, 0, &value))
1813 mutex_lock(&q->debugfs_mutex);
1815 bt = rcu_dereference_protected(q->blk_trace,
1816 lockdep_is_held(&q->debugfs_mutex));
1817 if (attr == &dev_attr_enable) {
1818 if (!!value == !!bt) {
1820 goto out_unlock_bdev;
1823 ret = blk_trace_setup_queue(q, bdev);
1825 ret = blk_trace_remove_queue(q);
1826 goto out_unlock_bdev;
1831 ret = blk_trace_setup_queue(q, bdev);
1832 bt = rcu_dereference_protected(q->blk_trace,
1833 lockdep_is_held(&q->debugfs_mutex));
1837 if (attr == &dev_attr_act_mask)
1838 bt->act_mask = value;
1839 else if (attr == &dev_attr_pid)
1841 else if (attr == &dev_attr_start_lba)
1842 bt->start_lba = value;
1843 else if (attr == &dev_attr_end_lba)
1844 bt->end_lba = value;
1848 mutex_unlock(&q->debugfs_mutex);
1850 return ret ? ret : count;
1853 int blk_trace_init_sysfs(struct device *dev)
1855 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1858 void blk_trace_remove_sysfs(struct device *dev)
1860 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1863 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1865 #ifdef CONFIG_EVENT_TRACING
1868 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
1869 * @rwbs: buffer to be filled
1870 * @op: REQ_OP_XXX for the tracepoint
1873 * Maps the REQ_OP_XXX to character and fills the buffer provided by the
1874 * caller with resulting string.
1877 void blk_fill_rwbs(char *rwbs, unsigned int op)
1881 if (op & REQ_PREFLUSH)
1884 switch (op & REQ_OP_MASK) {
1886 case REQ_OP_WRITE_SAME:
1889 case REQ_OP_DISCARD:
1892 case REQ_OP_SECURE_ERASE:
1908 if (op & REQ_RAHEAD)
1917 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1919 #endif /* CONFIG_EVENT_TRACING */