Merge tag 'net-next-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev...
[linux-2.6-microblaze.git] / net / sunrpc / xprtrdma / svc_rdma_rw.c
index 5238bc8..e27433f 100644 (file)
@@ -35,6 +35,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
  * controlling svcxprt_rdma is destroyed.
  */
 struct svc_rdma_rw_ctxt {
+       struct llist_node       rw_node;
        struct list_head        rw_list;
        struct rdma_rw_ctx      rw_ctx;
        unsigned int            rw_nents;
@@ -53,19 +54,19 @@ static struct svc_rdma_rw_ctxt *
 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
 {
        struct svc_rdma_rw_ctxt *ctxt;
+       struct llist_node *node;
 
        spin_lock(&rdma->sc_rw_ctxt_lock);
-
-       ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
-       if (ctxt) {
-               list_del(&ctxt->rw_list);
-               spin_unlock(&rdma->sc_rw_ctxt_lock);
+       node = llist_del_first(&rdma->sc_rw_ctxts);
+       spin_unlock(&rdma->sc_rw_ctxt_lock);
+       if (node) {
+               ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
        } else {
-               spin_unlock(&rdma->sc_rw_ctxt_lock);
                ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
                               GFP_KERNEL);
                if (!ctxt)
                        goto out_noctx;
+
                INIT_LIST_HEAD(&ctxt->rw_list);
        }
 
@@ -83,14 +84,18 @@ out_noctx:
        return NULL;
 }
 
-static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
-                                struct svc_rdma_rw_ctxt *ctxt)
+static void __svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
+                                  struct svc_rdma_rw_ctxt *ctxt,
+                                  struct llist_head *list)
 {
        sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
+       llist_add(&ctxt->rw_node, list);
+}
 
-       spin_lock(&rdma->sc_rw_ctxt_lock);
-       list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
-       spin_unlock(&rdma->sc_rw_ctxt_lock);
+static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
+                                struct svc_rdma_rw_ctxt *ctxt)
+{
+       __svc_rdma_put_rw_ctxt(rdma, ctxt, &rdma->sc_rw_ctxts);
 }
 
 /**
@@ -101,9 +106,10 @@ static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
 {
        struct svc_rdma_rw_ctxt *ctxt;
+       struct llist_node *node;
 
-       while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
-               list_del(&ctxt->rw_list);
+       while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) {
+               ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
                kfree(ctxt);
        }
 }
@@ -171,20 +177,35 @@ static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
        cc->cc_sqecount = 0;
 }
 
+/*
+ * The consumed rw_ctx's are cleaned and placed on a local llist so
+ * that only one atomic llist operation is needed to put them all
+ * back on the free list.
+ */
 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
                                enum dma_data_direction dir)
 {
        struct svcxprt_rdma *rdma = cc->cc_rdma;
+       struct llist_node *first, *last;
        struct svc_rdma_rw_ctxt *ctxt;
+       LLIST_HEAD(free);
 
+       first = last = NULL;
        while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
                list_del(&ctxt->rw_list);
 
                rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
                                    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
                                    ctxt->rw_nents, dir);
-               svc_rdma_put_rw_ctxt(rdma, ctxt);
+               __svc_rdma_put_rw_ctxt(rdma, ctxt, &free);
+
+               ctxt->rw_node.next = first;
+               first = &ctxt->rw_node;
+               if (!last)
+                       last = first;
        }
+       if (first)
+               llist_add_batch(first, last, &rdma->sc_rw_ctxts);
 }
 
 /* State for sending a Write or Reply chunk.
@@ -248,8 +269,7 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
 
        trace_svcrdma_wc_write(wc, &cc->cc_cid);
 
-       atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
-       wake_up(&rdma->sc_send_wait);
+       svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
 
        if (unlikely(wc->status != IB_WC_SUCCESS))
                svc_xprt_deferred_close(&rdma->sc_xprt);
@@ -304,9 +324,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
 
        trace_svcrdma_wc_read(wc, &cc->cc_cid);
 
-       atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
-       wake_up(&rdma->sc_send_wait);
-
+       svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
        cc->cc_status = wc->status;
        complete(&cc->cc_done);
        return;
@@ -483,7 +501,7 @@ out_overflow:
  * @iov: kvec to write
  *
  * Returns:
- *   On succes, returns zero
+ *   On success, returns zero
  *   %-E2BIG if the client-provided Write chunk is too small
  *   %-ENOMEM if a resource has been exhausted
  *   %-EIO if an rdma-rw error occurred
@@ -504,7 +522,7 @@ static int svc_rdma_iov_write(struct svc_rdma_write_info *info,
  * @length: number of bytes to write
  *
  * Returns:
- *   On succes, returns zero
+ *   On success, returns zero
  *   %-E2BIG if the client-provided Write chunk is too small
  *   %-ENOMEM if a resource has been exhausted
  *   %-EIO if an rdma-rw error occurred
@@ -526,7 +544,7 @@ static int svc_rdma_pages_write(struct svc_rdma_write_info *info,
  * @data: pointer to write arguments
  *
  * Returns:
- *   On succes, returns zero
+ *   On success, returns zero
  *   %-E2BIG if the client-provided Write chunk is too small
  *   %-ENOMEM if a resource has been exhausted
  *   %-EIO if an rdma-rw error occurred