IB/hfi1: Ensure read of producer s_head is correct
authorMike Marciniszyn <mike.marciniszyn@intel.com>
Wed, 8 Feb 2017 13:26:08 +0000 (05:26 -0800)
committerDoug Ledford <dledford@redhat.com>
Sun, 19 Feb 2017 14:18:33 +0000 (09:18 -0500)
The read of s_head in the hfi1_make_rc_req() and
qib_make_rc_req() lack the necesary barrier instuctions.

Correct other ACCESS_ONCE() warnings in the same file.

Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/qib/qib_rc.c

index 1dd999e..6446179 100644 (file)
@@ -414,7 +414,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                        goto bail;
                /* We are in the error state, flush the work request. */
                smp_read_barrier_depends(); /* see post_one_send() */
-               if (qp->s_last == ACCESS_ONCE(qp->s_head))
+               if (qp->s_last == READ_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
                if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -457,7 +457,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                newreq = 0;
                if (qp->s_cur == qp->s_tail) {
                        /* Check if send work queue is empty. */
-                       if (qp->s_tail == qp->s_head) {
+                       smp_read_barrier_depends(); /* see post_one_send() */
+                       if (qp->s_tail == READ_ONCE(qp->s_head)) {
                                clear_ahg(qp);
                                goto bail;
                        }
@@ -1590,7 +1591,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
 
        /* Ignore invalid responses. */
        smp_read_barrier_depends(); /* see post_one_send */
-       if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
+       if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
                goto ack_done;
 
        /* Ignore duplicate responses. */
index 031433c..696bcd0 100644 (file)
@@ -257,7 +257,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
                        goto bail;
                /* We are in the error state, flush the work request. */
                smp_read_barrier_depends(); /* see post_one_send() */
-               if (qp->s_last == ACCESS_ONCE(qp->s_head))
+               if (qp->s_last == READ_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
                if (atomic_read(&priv->s_dma_busy)) {
@@ -303,7 +303,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
                newreq = 0;
                if (qp->s_cur == qp->s_tail) {
                        /* Check if send work queue is empty. */
-                       if (qp->s_tail == qp->s_head)
+                       smp_read_barrier_depends(); /* see post_one_send() */
+                       if (qp->s_tail == READ_ONCE(qp->s_head))
                                goto bail;
                        /*
                         * If a fence is requested, wait for previous
@@ -1390,7 +1391,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
 
        /* Ignore invalid responses. */
        smp_read_barrier_depends(); /* see post_one_send */
-       if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
+       if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
                goto ack_done;
 
        /* Ignore duplicate responses. */