RDMA/erdma: Unify byte ordering APIs usage
authorCheng Xu <chengyou@linux.alibaba.com>
Wed, 22 Mar 2023 09:33:17 +0000 (17:33 +0800)
committerLeon Romanovsky <leon@kernel.org>
Wed, 22 Mar 2023 11:10:46 +0000 (13:10 +0200)
Replace __be32_to_cpu/__cpu_to_be16 with be32_to_cpu/cpu_to_be16.
And use be32_to_cpu_array to copy and swap byte order to hide the
loop.

Signed-off-by: Cheng Xu <chengyou@linux.alibaba.com>
Link: https://lore.kernel.org/r/20230322093319.84045-2-chengyou@linux.alibaba.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/erdma/erdma_cm.h
drivers/infiniband/hw/erdma/erdma_cmdq.c
drivers/infiniband/hw/erdma/erdma_cq.c

index 8a3f998..a26d807 100644 (file)
@@ -33,11 +33,11 @@ struct mpa_rr_params {
  * MPA request/response Hdr bits & fields
  */
 enum {
-       MPA_RR_FLAG_MARKERS = __cpu_to_be16(0x8000),
-       MPA_RR_FLAG_CRC = __cpu_to_be16(0x4000),
-       MPA_RR_FLAG_REJECT = __cpu_to_be16(0x2000),
-       MPA_RR_RESERVED = __cpu_to_be16(0x1f00),
-       MPA_RR_MASK_REVISION = __cpu_to_be16(0x00ff)
+       MPA_RR_FLAG_MARKERS = cpu_to_be16(0x8000),
+       MPA_RR_FLAG_CRC = cpu_to_be16(0x4000),
+       MPA_RR_FLAG_REJECT = cpu_to_be16(0x2000),
+       MPA_RR_RESERVED = cpu_to_be16(0x1f00),
+       MPA_RR_MASK_REVISION = cpu_to_be16(0x00ff)
 };
 
 /*
index 6ebfa69..3fd33b8 100644 (file)
@@ -283,7 +283,7 @@ static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
        __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
                                      cmdq->cq.depth, CQE_SHIFT);
        u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
-                             __be32_to_cpu(READ_ONCE(*cqe)));
+                             be32_to_cpu(READ_ONCE(*cqe)));
 
        return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
 }
@@ -319,7 +319,6 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
        __be32 *cqe;
        u16 ctx_id;
        u64 *sqe;
-       int i;
 
        cqe = get_next_valid_cmdq_cqe(cmdq);
        if (!cqe)
@@ -328,8 +327,8 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
        cmdq->cq.ci++;
 
        dma_rmb();
-       hdr0 = __be32_to_cpu(*cqe);
-       sqe_idx = __be32_to_cpu(*(cqe + 1));
+       hdr0 = be32_to_cpu(*cqe);
+       sqe_idx = be32_to_cpu(*(cqe + 1));
 
        sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
                              SQEBB_SHIFT);
@@ -341,9 +340,8 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
        comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED;
        comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0);
        cmdq->sq.ci += cmdq->sq.wqebb_cnt;
-
-       for (i = 0; i < 4; i++)
-               comp_wait->comp_data[i] = __be32_to_cpu(*(cqe + 2 + i));
+       /* Copy 16B comp data after cqe hdr to outer */
+       be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4);
 
        if (cmdq->use_event)
                complete(&comp_wait->wait_event);
index cabd867..7e2bfa6 100644 (file)
@@ -11,7 +11,7 @@ static void *get_next_valid_cqe(struct erdma_cq *cq)
        __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
                                      cq->depth, CQE_SHIFT);
        u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
-                             __be32_to_cpu(READ_ONCE(*cqe)));
+                             be32_to_cpu(READ_ONCE(*cqe)));
 
        return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
 }