IB/hfi1: Enable TID RDMA WRITE protocol
authorKaike Wan <kaike.wan@intel.com>
Thu, 24 Jan 2019 05:51:59 +0000 (21:51 -0800)
committerDoug Ledford <dledford@redhat.com>
Tue, 5 Feb 2019 23:07:44 +0000 (18:07 -0500)
This patch enables TID RDMA WRITE protocol by converting a qualified
RDMA WRITE request into a TID RDMA WRITE request internally:
(1) The TID RDMA cability must be enabled;
(2) The request must start on a 4K page boundary;
(3) The request length must be a multiple of 4K and must be larger or
equal to 256K.

Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Kaike Wan <kaike.wan@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/tid_rdma.h

index 2867520..db3188f 100644 (file)
@@ -3322,6 +3322,18 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
                        new_opcode = IB_WR_TID_RDMA_READ;
                        do_tid_rdma = true;
                }
+       } else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
+               /*
+                * TID RDMA is enabled for this RDMA WRITE request iff:
+                *   1. The remote address is page-aligned,
+                *   2. The length is larger than the minimum segment size,
+                *   3. The length is page-multiple.
+                */
+               if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) &&
+                   !(wqe->length & ~PAGE_MASK)) {
+                       new_opcode = IB_WR_TID_RDMA_WRITE;
+                       do_tid_rdma = true;
+               }
        }
 
        if (do_tid_rdma) {
@@ -3338,12 +3350,22 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
                        priv->tid_req.n_flows = remote->max_read;
                        qpriv->tid_r_reqs++;
                        wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
+               } else {
+                       wqe->lpsn += priv->tid_req.total_segs - 1;
+                       atomic_inc(&qpriv->n_requests);
                }
 
                priv->tid_req.cur_seg = 0;
                priv->tid_req.comp_seg = 0;
                priv->tid_req.ack_seg = 0;
                priv->tid_req.state = TID_REQUEST_INACTIVE;
+               /*
+                * Reset acked_tail.
+                * TID RDMA READ does not have ACKs so it does not
+                * update the pointer. We have to reset it so TID RDMA
+                * WRITE does not get confused.
+                */
+               priv->tid_req.acked_tail = priv->tid_req.setup_head;
                trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
                                                 wqe->psn, wqe->lpsn,
                                                 &priv->tid_req);
index 4446818..53ab24e 100644 (file)
@@ -266,7 +266,8 @@ static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
                                           struct rvt_swqe *wqe)
 {
        if (wqe->priv &&
-           wqe->wr.opcode == IB_WR_RDMA_READ &&
+           (wqe->wr.opcode == IB_WR_RDMA_READ ||
+            wqe->wr.opcode == IB_WR_RDMA_WRITE) &&
            wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
                setup_tid_rdma_wqe(qp, wqe);
 }