io_uring: kbuf: inline io_kbuf_recycle_ring()
authorHao Xu <howeyxu@tencent.com>
Thu, 23 Jun 2022 13:01:26 +0000 (21:01 +0800)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Jul 2022 00:39:16 +0000 (18:39 -0600)
Make io_kbuf_recycle_ring() inline since it is the fast path of
provided buffer.

Signed-off-by: Hao Xu <howeyxu@tencent.com>
Link: https://lore.kernel.org/r/20220623130126.179232-1-hao.xu@linux.dev
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/kbuf.c
io_uring/kbuf.h

index 8bf47e4..5e00f16 100644 (file)
@@ -74,34 +74,6 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
        return;
 }
 
-void io_kbuf_recycle_ring(struct io_kiocb *req)
-{
-       /*
-        * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
-        * the flag and hence ensure that bl->head doesn't get incremented.
-        * If the tail has already been incremented, hang on to it.
-        * The exception is partial io, that case we should increment bl->head
-        * to monopolize the buffer.
-        */
-       if (req->buf_list) {
-               if (req->flags & REQ_F_PARTIAL_IO) {
-                       /*
-                        * If we end up here, then the io_uring_lock has
-                        * been kept held since we retrieved the buffer.
-                        * For the io-wq case, we already cleared
-                        * req->buf_list when the buffer was retrieved,
-                        * hence it cannot be set here for that case.
-                        */
-                       req->buf_list->head++;
-                       req->buf_list = NULL;
-               } else {
-                       req->buf_index = req->buf_list->bgid;
-                       req->flags &= ~REQ_F_BUFFER_RING;
-               }
-       }
-       return;
-}
-
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
 {
        unsigned int cflags;
index 721465c..b3e8c6c 100644 (file)
@@ -49,7 +49,33 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
 
 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
-void io_kbuf_recycle_ring(struct io_kiocb *req);
+
+static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
+{
+       /*
+        * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
+        * the flag and hence ensure that bl->head doesn't get incremented.
+        * If the tail has already been incremented, hang on to it.
+        * The exception is partial io, that case we should increment bl->head
+        * to monopolize the buffer.
+        */
+       if (req->buf_list) {
+               if (req->flags & REQ_F_PARTIAL_IO) {
+                       /*
+                        * If we end up here, then the io_uring_lock has
+                        * been kept held since we retrieved the buffer.
+                        * For the io-wq case, we already cleared
+                        * req->buf_list when the buffer was retrieved,
+                        * hence it cannot be set here for that case.
+                        */
+                       req->buf_list->head++;
+                       req->buf_list = NULL;
+               } else {
+                       req->buf_index = req->buf_list->bgid;
+                       req->flags &= ~REQ_F_BUFFER_RING;
+               }
+       }
+}
 
 static inline bool io_do_buffer_select(struct io_kiocb *req)
 {