Merge branch 'ct-offload' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rx.c
index 9e99601..57b2494 100644 (file)
@@ -158,7 +158,8 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
                        mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
 
                mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
-               rq->handle_rx_cqe(rq, &cqd->title);
+               INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+                               mlx5e_handle_rx_cqe, rq, &cqd->title);
        }
        mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
        wq->cc = cqcc;
@@ -178,7 +179,8 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
        mlx5e_read_title_slot(rq, wq, cc);
        mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
        mlx5e_decompress_cqe(rq, wq, cc);
-       rq->handle_rx_cqe(rq, &cqd->title);
+       INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+                       mlx5e_handle_rx_cqe, rq, &cqd->title);
        cqd->mini_arr_idx++;
 
        return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
@@ -613,13 +615,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 
                wqe_counter = be16_to_cpu(cqe->wqe_counter);
 
-               if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
-                       netdev_WARN_ONCE(cq->channel->netdev,
-                                        "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
-                       if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
-                               queue_work(cq->channel->priv->wq, &sq->recover_work);
-                       break;
-               }
                do {
                        struct mlx5e_sq_wqe_info *wi;
                        u16 ci;
@@ -629,6 +624,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
                        ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
                        wi = &sq->db.ico_wqe[ci];
 
+                       if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+                               netdev_WARN_ONCE(cq->channel->netdev,
+                                                "Bad OP in ICOSQ CQE: 0x%x\n",
+                                                get_cqe_opcode(cqe));
+                               if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+                                       queue_work(cq->channel->priv->wq, &sq->recover_work);
+                               break;
+                       }
+
                        if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
                                sqcc += MLX5E_UMR_WQEBBS;
                                wi->umr.rq->mpwqe.umr_completed++;
@@ -1191,6 +1195,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5e_rep_priv *rpriv  = priv->ppriv;
        struct mlx5_eswitch_rep *rep = rpriv->rep;
+       struct mlx5e_tc_update_priv tc_priv = {};
        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
        struct mlx5e_wqe_frag_info *wi;
        struct sk_buff *skb;
@@ -1223,13 +1228,78 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        if (rep->vlan && skb_vlan_tag_present(skb))
                skb_vlan_pop(skb);
 
+       if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+               goto free_wqe;
+
        napi_gro_receive(rq->cq.napi, skb);
 
+       mlx5_tc_rep_post_napi_receive(&tc_priv);
+
 free_wqe:
        mlx5e_free_rx_wqe(rq, wi, true);
 wq_cyc_pop:
        mlx5_wq_cyc_pop(wq);
 }
+
+void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
+                                  struct mlx5_cqe64 *cqe)
+{
+       u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
+       u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
+       struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+       u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
+       u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
+       u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
+       u32 page_idx       = wqe_offset >> PAGE_SHIFT;
+       struct mlx5e_tc_update_priv tc_priv = {};
+       struct mlx5e_rx_wqe_ll *wqe;
+       struct mlx5_wq_ll *wq;
+       struct sk_buff *skb;
+       u16 cqe_bcnt;
+
+       wi->consumed_strides += cstrides;
+
+       if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+               trigger_report(rq, cqe);
+               rq->stats->wqe_err++;
+               goto mpwrq_cqe_out;
+       }
+
+       if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+               struct mlx5e_rq_stats *stats = rq->stats;
+
+               stats->mpwqe_filler_cqes++;
+               stats->mpwqe_filler_strides += cstrides;
+               goto mpwrq_cqe_out;
+       }
+
+       cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+       skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+                             mlx5e_skb_from_cqe_mpwrq_linear,
+                             mlx5e_skb_from_cqe_mpwrq_nonlinear,
+                             rq, wi, cqe_bcnt, head_offset, page_idx);
+       if (!skb)
+               goto mpwrq_cqe_out;
+
+       mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+       if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+               goto mpwrq_cqe_out;
+
+       napi_gro_receive(rq->cq.napi, skb);
+
+       mlx5_tc_rep_post_napi_receive(&tc_priv);
+
+mpwrq_cqe_out:
+       if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+               return;
+
+       wq  = &rq->mpwqe.wq;
+       wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+       mlx5e_free_rx_mpwqe(rq, wi, true);
+       mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
 #endif
 
 struct sk_buff *