Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index 2c3c594..b3f02aa 100644 (file)
@@ -246,12 +246,17 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
 
 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
                                 u64 npages, u8 page_shift,
-                                struct mlx5_core_mkey *umr_mkey)
+                                struct mlx5_core_mkey *umr_mkey,
+                                dma_addr_t filler_addr)
 {
-       int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+       struct mlx5_mtt *mtt;
+       int inlen;
        void *mkc;
        u32 *in;
        int err;
+       int i;
+
+       inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
 
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
@@ -271,6 +276,18 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
        MLX5_SET(mkc, mkc, translations_octword_size,
                 MLX5_MTT_OCTW(npages));
        MLX5_SET(mkc, mkc, log_page_size, page_shift);
+       MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+                MLX5_MTT_OCTW(npages));
+
+       /* Initialize the mkey with all MTTs pointing to a default
+        * page (filler_addr). When the channels are activated, UMR
+        * WQEs will redirect the RX WQEs to the actual memory from
+        * the RQ's pool, while the gaps (wqe_overflow) remain mapped
+        * to the default page.
+        */
+       mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+       for (i = 0 ; i < npages ; i++)
+               mtt[i].ptag = cpu_to_be64(filler_addr);
 
        err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
 
@@ -282,7 +299,8 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
 {
        u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
 
-       return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
+       return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey,
+                                    rq->wqe_overflow.addr);
 }
 
 static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
@@ -350,6 +368,28 @@ static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
        mlx5e_reporter_rq_cqe_err(rq);
 }
 
+static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
+{
+       rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
+       if (!rq->wqe_overflow.page)
+               return -ENOMEM;
+
+       rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
+                                            PAGE_SIZE, rq->buff.map_dir);
+       if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
+               __free_page(rq->wqe_overflow.page);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
+{
+        dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
+                       rq->buff.map_dir);
+        __free_page(rq->wqe_overflow.page);
+}
+
 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                          struct mlx5e_params *params,
                          struct mlx5e_xsk_param *xsk,
@@ -396,7 +436,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
        err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix);
        if (err < 0)
-               goto err_rq_wq_destroy;
+               goto err_rq_xdp_prog;
 
        rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
        rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
@@ -406,6 +446,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
                err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
                                        &rq->wq_ctrl);
+               if (err)
+                       goto err_rq_xdp;
+
+               err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
                if (err)
                        goto err_rq_wq_destroy;
 
@@ -424,18 +468,18 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 
                err = mlx5e_create_rq_umr_mkey(mdev, rq);
                if (err)
-                       goto err_rq_wq_destroy;
+                       goto err_rq_drop_page;
                rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
 
                err = mlx5e_rq_alloc_mpwqe_info(rq, c);
                if (err)
-                       goto err_free;
+                       goto err_rq_mkey;
                break;
        default: /* MLX5_WQ_TYPE_CYCLIC */
                err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
                                         &rq->wq_ctrl);
                if (err)
-                       goto err_rq_wq_destroy;
+                       goto err_rq_xdp;
 
                rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
 
@@ -450,19 +494,19 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                                      GFP_KERNEL, cpu_to_node(c->cpu));
                if (!rq->wqe.frags) {
                        err = -ENOMEM;
-                       goto err_free;
+                       goto err_rq_wq_destroy;
                }
 
                err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
                if (err)
-                       goto err_free;
+                       goto err_rq_frags;
 
                rq->mkey_be = c->mkey_be;
        }
 
        err = mlx5e_rq_set_handlers(rq, params, xsk);
        if (err)
-               goto err_free;
+               goto err_free_by_rq_type;
 
        if (xsk) {
                err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
@@ -486,13 +530,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                if (IS_ERR(rq->page_pool)) {
                        err = PTR_ERR(rq->page_pool);
                        rq->page_pool = NULL;
-                       goto err_free;
+                       goto err_free_by_rq_type;
                }
                err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
                                                 MEM_TYPE_PAGE_POOL, rq->page_pool);
        }
        if (err)
-               goto err_free;
+               goto err_free_by_rq_type;
 
        for (i = 0; i < wq_sz; i++) {
                if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -542,23 +586,27 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 
        return 0;
 
-err_free:
+err_free_by_rq_type:
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
                kvfree(rq->mpwqe.info);
+err_rq_mkey:
                mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
+err_rq_drop_page:
+               mlx5e_free_mpwqe_rq_drop_page(rq);
                break;
        default: /* MLX5_WQ_TYPE_CYCLIC */
-               kvfree(rq->wqe.frags);
                mlx5e_free_di_list(rq);
+err_rq_frags:
+               kvfree(rq->wqe.frags);
        }
-
 err_rq_wq_destroy:
+       mlx5_wq_destroy(&rq->wq_ctrl);
+err_rq_xdp:
+       xdp_rxq_info_unreg(&rq->xdp_rxq);
+err_rq_xdp_prog:
        if (params->xdp_prog)
                bpf_prog_put(params->xdp_prog);
-       xdp_rxq_info_unreg(&rq->xdp_rxq);
-       page_pool_destroy(rq->page_pool);
-       mlx5_wq_destroy(&rq->wq_ctrl);
 
        return err;
 }
@@ -580,6 +628,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
                kvfree(rq->mpwqe.info);
                mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
+               mlx5e_free_mpwqe_rq_drop_page(rq);
                break;
        default: /* MLX5_WQ_TYPE_CYCLIC */
                kvfree(rq->wqe.frags);
@@ -4202,6 +4251,21 @@ int mlx5e_get_vf_stats(struct net_device *dev,
 }
 #endif
 
+static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
+                                                          struct sk_buff *skb)
+{
+       switch (skb->inner_protocol) {
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+       case htons(ETH_P_TEB):
+               return true;
+       case htons(ETH_P_MPLS_UC):
+       case htons(ETH_P_MPLS_MC):
+               return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre);
+       }
+       return false;
+}
+
 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                                                     struct sk_buff *skb,
                                                     netdev_features_t features)
@@ -4224,7 +4288,9 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
 
        switch (proto) {
        case IPPROTO_GRE:
-               return features;
+               if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb))
+                       return features;
+               break;
        case IPPROTO_IPIP:
        case IPPROTO_IPV6:
                if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))