Merge branch 'ct-offload' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rx.c
index 065c74a..57b2494 100644 (file)
@@ -1195,6 +1195,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5e_rep_priv *rpriv  = priv->ppriv;
        struct mlx5_eswitch_rep *rep = rpriv->rep;
+       struct mlx5e_tc_update_priv tc_priv = {};
        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
        struct mlx5e_wqe_frag_info *wi;
        struct sk_buff *skb;
@@ -1227,13 +1228,78 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        if (rep->vlan && skb_vlan_tag_present(skb))
                skb_vlan_pop(skb);
 
+       if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+               goto free_wqe;
+
        napi_gro_receive(rq->cq.napi, skb);
 
+       mlx5_tc_rep_post_napi_receive(&tc_priv);
+
 free_wqe:
        mlx5e_free_rx_wqe(rq, wi, true);
 wq_cyc_pop:
        mlx5_wq_cyc_pop(wq);
 }
+
+void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
+                                  struct mlx5_cqe64 *cqe)
+{
+       u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
+       u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
+       struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+       u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
+       u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
+       u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
+       u32 page_idx       = wqe_offset >> PAGE_SHIFT;
+       struct mlx5e_tc_update_priv tc_priv = {};
+       struct mlx5e_rx_wqe_ll *wqe;
+       struct mlx5_wq_ll *wq;
+       struct sk_buff *skb;
+       u16 cqe_bcnt;
+
+       wi->consumed_strides += cstrides;
+
+       if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+               trigger_report(rq, cqe);
+               rq->stats->wqe_err++;
+               goto mpwrq_cqe_out;
+       }
+
+       if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+               struct mlx5e_rq_stats *stats = rq->stats;
+
+               stats->mpwqe_filler_cqes++;
+               stats->mpwqe_filler_strides += cstrides;
+               goto mpwrq_cqe_out;
+       }
+
+       cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+       skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+                             mlx5e_skb_from_cqe_mpwrq_linear,
+                             mlx5e_skb_from_cqe_mpwrq_nonlinear,
+                             rq, wi, cqe_bcnt, head_offset, page_idx);
+       if (!skb)
+               goto mpwrq_cqe_out;
+
+       mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+       if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+               goto mpwrq_cqe_out;
+
+       napi_gro_receive(rq->cq.napi, skb);
+
+       mlx5_tc_rep_post_napi_receive(&tc_priv);
+
+mpwrq_cqe_out:
+       if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+               return;
+
+       wq  = &rq->mpwqe.wq;
+       wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+       mlx5e_free_rx_mpwqe(rq, wi, true);
+       mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
 #endif
 
 struct sk_buff *