struct io_ring_ctx *ctx = f->private_data;
        struct io_overflow_cqe *ocqe;
        struct io_rings *r = ctx->rings;
+       struct rusage sq_usage;
        unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
        unsigned int sq_head = READ_ONCE(r->sq.head);
        unsigned int sq_tail = READ_ONCE(r->sq.tail);
        unsigned int sq_shift = 0;
        unsigned int sq_entries, cq_entries;
        int sq_pid = -1, sq_cpu = -1;
+       u64 sq_total_time = 0, sq_work_time = 0;
        bool has_lock;
        unsigned int i;
 
 
                sq_pid = sq->task_pid;
                sq_cpu = sq->sq_cpu;
+               getrusage(sq->thread, RUSAGE_SELF, &sq_usage);
+               sq_total_time = sq_usage.ru_stime.tv_sec * 1000000 + sq_usage.ru_stime.tv_usec;
+               sq_work_time = sq->work_time;
        }
 
        seq_printf(m, "SqThread:\t%d\n", sq_pid);
        seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
+       seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time);
+       seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time);
        seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
        for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
                struct file *f = io_file_from_index(&ctx->file_table, i);
 
        return retry_list || !llist_empty(&tctx->task_list);
 }
 
+static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
+{
+       struct rusage end;
+
+       getrusage(current, RUSAGE_SELF, &end);
+       end.ru_stime.tv_sec -= start->ru_stime.tv_sec;
+       end.ru_stime.tv_usec -= start->ru_stime.tv_usec;
+
+       sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000;
+}
+
 static int io_sq_thread(void *data)
 {
        struct llist_node *retry_list = NULL;
        struct io_sq_data *sqd = data;
        struct io_ring_ctx *ctx;
+       struct rusage start;
        unsigned long timeout = 0;
        char buf[TASK_COMM_LEN];
        DEFINE_WAIT(wait);
                }
 
                cap_entries = !list_is_singular(&sqd->ctx_list);
+               getrusage(current, RUSAGE_SELF, &start);
                list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
                        int ret = __io_sq_thread(ctx, cap_entries);
 
                        sqt_spin = true;
 
                if (sqt_spin || !time_after(jiffies, timeout)) {
-                       if (sqt_spin)
+                       if (sqt_spin) {
+                               io_sq_update_worktime(sqd, &start);
                                timeout = jiffies + sqd->sq_thread_idle;
+                       }
                        if (unlikely(need_resched())) {
                                mutex_unlock(&sqd->lock);
                                cond_resched();