Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
1 /*
2  * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
38 #include "eswitch.h"
39 #include "en.h"
40 #include "en_tc.h"
41 #include "en_rep.h"
42 #include "en_accel/ipsec.h"
43 #include "en_accel/ipsec_rxtx.h"
44 #include "accel/ipsec.h"
45 #include "vxlan.h"
46
47 struct mlx5e_rq_param {
48         u32                     rqc[MLX5_ST_SZ_DW(rqc)];
49         struct mlx5_wq_param    wq;
50 };
51
52 struct mlx5e_sq_param {
53         u32                        sqc[MLX5_ST_SZ_DW(sqc)];
54         struct mlx5_wq_param       wq;
55 };
56
57 struct mlx5e_cq_param {
58         u32                        cqc[MLX5_ST_SZ_DW(cqc)];
59         struct mlx5_wq_param       wq;
60         u16                        eq_ix;
61         u8                         cq_period_mode;
62 };
63
64 struct mlx5e_channel_param {
65         struct mlx5e_rq_param      rq;
66         struct mlx5e_sq_param      sq;
67         struct mlx5e_sq_param      xdp_sq;
68         struct mlx5e_sq_param      icosq;
69         struct mlx5e_cq_param      rx_cq;
70         struct mlx5e_cq_param      tx_cq;
71         struct mlx5e_cq_param      icosq_cq;
72 };
73
74 static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
75 {
76         return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
77 }
78
79 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
80 {
81         return MLX5_CAP_GEN(mdev, striding_rq) &&
82                 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
83                 MLX5_CAP_ETH(mdev, reg_umr_sq);
84 }
85
86 void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
87                               struct mlx5e_params *params, u8 rq_type)
88 {
89         params->rq_wq_type = rq_type;
90         params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
91         switch (params->rq_wq_type) {
92         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
93                 params->log_rq_size = is_kdump_kernel() ?
94                         MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
95                         MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
96                 params->mpwqe_log_stride_sz =
97                         MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
98                         MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
99                         MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
100                 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
101                         params->mpwqe_log_stride_sz;
102                 break;
103         default: /* MLX5_WQ_TYPE_LINKED_LIST */
104                 params->log_rq_size = is_kdump_kernel() ?
105                         MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
106                         MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
107                 params->rq_headroom = params->xdp_prog ?
108                         XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
109                 params->rq_headroom += NET_IP_ALIGN;
110
111                 /* Extra room needed for build_skb */
112                 params->lro_wqe_sz -= params->rq_headroom +
113                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
114         }
115
116         mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
117                        params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
118                        BIT(params->log_rq_size),
119                        BIT(params->mpwqe_log_stride_sz),
120                        MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
121 }
122
123 static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
124 {
125         u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
126                     !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
127                     MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
128                     MLX5_WQ_TYPE_LINKED_LIST;
129         mlx5e_set_rq_type_params(mdev, params, rq_type);
130 }
131
132 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
133 {
134         struct mlx5_core_dev *mdev = priv->mdev;
135         u8 port_state;
136
137         port_state = mlx5_query_vport_state(mdev,
138                                             MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
139                                             0);
140
141         if (port_state == VPORT_STATE_UP) {
142                 netdev_info(priv->netdev, "Link up\n");
143                 netif_carrier_on(priv->netdev);
144         } else {
145                 netdev_info(priv->netdev, "Link down\n");
146                 netif_carrier_off(priv->netdev);
147         }
148 }
149
150 static void mlx5e_update_carrier_work(struct work_struct *work)
151 {
152         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
153                                                update_carrier_work);
154
155         mutex_lock(&priv->state_lock);
156         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
157                 if (priv->profile->update_carrier)
158                         priv->profile->update_carrier(priv);
159         mutex_unlock(&priv->state_lock);
160 }
161
162 static void mlx5e_tx_timeout_work(struct work_struct *work)
163 {
164         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
165                                                tx_timeout_work);
166         int err;
167
168         rtnl_lock();
169         mutex_lock(&priv->state_lock);
170         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
171                 goto unlock;
172         mlx5e_close_locked(priv->netdev);
173         err = mlx5e_open_locked(priv->netdev);
174         if (err)
175                 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
176                            err);
177 unlock:
178         mutex_unlock(&priv->state_lock);
179         rtnl_unlock();
180 }
181
182 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
183 {
184         struct mlx5e_sw_stats temp, *s = &temp;
185         struct mlx5e_rq_stats *rq_stats;
186         struct mlx5e_sq_stats *sq_stats;
187         int i, j;
188
189         memset(s, 0, sizeof(*s));
190         for (i = 0; i < priv->channels.num; i++) {
191                 struct mlx5e_channel *c = priv->channels.c[i];
192
193                 rq_stats = &c->rq.stats;
194
195                 s->rx_packets   += rq_stats->packets;
196                 s->rx_bytes     += rq_stats->bytes;
197                 s->rx_lro_packets += rq_stats->lro_packets;
198                 s->rx_lro_bytes += rq_stats->lro_bytes;
199                 s->rx_csum_none += rq_stats->csum_none;
200                 s->rx_csum_complete += rq_stats->csum_complete;
201                 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
202                 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
203                 s->rx_xdp_drop += rq_stats->xdp_drop;
204                 s->rx_xdp_tx += rq_stats->xdp_tx;
205                 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
206                 s->rx_wqe_err   += rq_stats->wqe_err;
207                 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
208                 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
209                 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
210                 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
211                 s->rx_page_reuse  += rq_stats->page_reuse;
212                 s->rx_cache_reuse += rq_stats->cache_reuse;
213                 s->rx_cache_full  += rq_stats->cache_full;
214                 s->rx_cache_empty += rq_stats->cache_empty;
215                 s->rx_cache_busy  += rq_stats->cache_busy;
216                 s->rx_cache_waive += rq_stats->cache_waive;
217
218                 for (j = 0; j < priv->channels.params.num_tc; j++) {
219                         sq_stats = &c->sq[j].stats;
220
221                         s->tx_packets           += sq_stats->packets;
222                         s->tx_bytes             += sq_stats->bytes;
223                         s->tx_tso_packets       += sq_stats->tso_packets;
224                         s->tx_tso_bytes         += sq_stats->tso_bytes;
225                         s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
226                         s->tx_tso_inner_bytes   += sq_stats->tso_inner_bytes;
227                         s->tx_queue_stopped     += sq_stats->stopped;
228                         s->tx_queue_wake        += sq_stats->wake;
229                         s->tx_queue_dropped     += sq_stats->dropped;
230                         s->tx_xmit_more         += sq_stats->xmit_more;
231                         s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
232                         s->tx_csum_none         += sq_stats->csum_none;
233                         s->tx_csum_partial      += sq_stats->csum_partial;
234                 }
235         }
236
237         s->link_down_events_phy = MLX5_GET(ppcnt_reg,
238                                 priv->stats.pport.phy_counters,
239                                 counter_set.phys_layer_cntrs.link_down_events);
240         memcpy(&priv->stats.sw, s, sizeof(*s));
241 }
242
243 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
244 {
245         int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
246         u32 *out = (u32 *)priv->stats.vport.query_vport_out;
247         u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
248         struct mlx5_core_dev *mdev = priv->mdev;
249
250         MLX5_SET(query_vport_counter_in, in, opcode,
251                  MLX5_CMD_OP_QUERY_VPORT_COUNTER);
252         MLX5_SET(query_vport_counter_in, in, op_mod, 0);
253         MLX5_SET(query_vport_counter_in, in, other_vport, 0);
254
255         mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
256 }
257
258 static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
259 {
260         struct mlx5e_pport_stats *pstats = &priv->stats.pport;
261         struct mlx5_core_dev *mdev = priv->mdev;
262         u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
263         int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
264         int prio;
265         void *out;
266
267         MLX5_SET(ppcnt_reg, in, local_port, 1);
268
269         out = pstats->IEEE_802_3_counters;
270         MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
271         mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
272
273         if (!full)
274                 return;
275
276         out = pstats->RFC_2863_counters;
277         MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
278         mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
279
280         out = pstats->RFC_2819_counters;
281         MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
282         mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
283
284         out = pstats->phy_counters;
285         MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
286         mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
287
288         if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
289                 out = pstats->phy_statistical_counters;
290                 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
291                 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
292         }
293
294         if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
295                 out = pstats->eth_ext_counters;
296                 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
297                 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
298         }
299
300         MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
301         for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
302                 out = pstats->per_prio_counters[prio];
303                 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
304                 mlx5_core_access_reg(mdev, in, sz, out, sz,
305                                      MLX5_REG_PPCNT, 0, 0);
306         }
307 }
308
309 static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
310 {
311         struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
312         u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
313         int err;
314
315         if (!priv->q_counter)
316                 return;
317
318         err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
319         if (err)
320                 return;
321
322         qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
323 }
324
325 static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
326 {
327         struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
328         struct mlx5_core_dev *mdev = priv->mdev;
329         u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
330         int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
331         void *out;
332
333         if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
334                 return;
335
336         out = pcie_stats->pcie_perf_counters;
337         MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
338         mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
339 }
340
341 void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
342 {
343         if (full) {
344                 mlx5e_update_pcie_counters(priv);
345                 mlx5e_ipsec_update_stats(priv);
346         }
347         mlx5e_update_pport_counters(priv, full);
348         mlx5e_update_vport_counters(priv);
349         mlx5e_update_q_counter(priv);
350         mlx5e_update_sw_counters(priv);
351 }
352
353 static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
354 {
355         mlx5e_update_stats(priv, false);
356 }
357
358 void mlx5e_update_stats_work(struct work_struct *work)
359 {
360         struct delayed_work *dwork = to_delayed_work(work);
361         struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
362                                                update_stats_work);
363         mutex_lock(&priv->state_lock);
364         if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
365                 priv->profile->update_stats(priv);
366                 queue_delayed_work(priv->wq, dwork,
367                                    msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
368         }
369         mutex_unlock(&priv->state_lock);
370 }
371
372 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
373                               enum mlx5_dev_event event, unsigned long param)
374 {
375         struct mlx5e_priv *priv = vpriv;
376
377         if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
378                 return;
379
380         switch (event) {
381         case MLX5_DEV_EVENT_PORT_UP:
382         case MLX5_DEV_EVENT_PORT_DOWN:
383                 queue_work(priv->wq, &priv->update_carrier_work);
384                 break;
385         default:
386                 break;
387         }
388 }
389
390 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
391 {
392         set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
393 }
394
395 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
396 {
397         clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
398         synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
399 }
400
401 static inline int mlx5e_get_wqe_mtt_sz(void)
402 {
403         /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
404          * To avoid copying garbage after the mtt array, we allocate
405          * a little more.
406          */
407         return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
408                      MLX5_UMR_MTT_ALIGNMENT);
409 }
410
411 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
412                                        struct mlx5e_icosq *sq,
413                                        struct mlx5e_umr_wqe *wqe,
414                                        u16 ix)
415 {
416         struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
417         struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
418         struct mlx5_wqe_data_seg      *dseg = &wqe->data;
419         struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
420         u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
421         u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
422
423         cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
424                                       ds_cnt);
425         cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
426         cseg->imm       = rq->mkey_be;
427
428         ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
429         ucseg->xlt_octowords =
430                 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
431         ucseg->bsf_octowords =
432                 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
433         ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
434
435         dseg->lkey = sq->mkey_be;
436         dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
437 }
438
439 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
440                                      struct mlx5e_channel *c)
441 {
442         int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
443         int mtt_sz = mlx5e_get_wqe_mtt_sz();
444         int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
445         int node = mlx5e_get_node(c->priv, c->ix);
446         int i;
447
448         rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
449                                         GFP_KERNEL, node);
450         if (!rq->mpwqe.info)
451                 goto err_out;
452
453         /* We allocate more than mtt_sz as we will align the pointer */
454         rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz,
455                                         GFP_KERNEL, node);
456         if (unlikely(!rq->mpwqe.mtt_no_align))
457                 goto err_free_wqe_info;
458
459         for (i = 0; i < wq_sz; i++) {
460                 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
461
462                 wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
463                                         MLX5_UMR_ALIGN);
464                 wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
465                                                   PCI_DMA_TODEVICE);
466                 if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
467                         goto err_unmap_mtts;
468
469                 mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
470         }
471
472         return 0;
473
474 err_unmap_mtts:
475         while (--i >= 0) {
476                 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
477
478                 dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
479                                  PCI_DMA_TODEVICE);
480         }
481         kfree(rq->mpwqe.mtt_no_align);
482 err_free_wqe_info:
483         kfree(rq->mpwqe.info);
484
485 err_out:
486         return -ENOMEM;
487 }
488
489 static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
490 {
491         int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
492         int mtt_sz = mlx5e_get_wqe_mtt_sz();
493         int i;
494
495         for (i = 0; i < wq_sz; i++) {
496                 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
497
498                 dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
499                                  PCI_DMA_TODEVICE);
500         }
501         kfree(rq->mpwqe.mtt_no_align);
502         kfree(rq->mpwqe.info);
503 }
504
505 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
506                                  u64 npages, u8 page_shift,
507                                  struct mlx5_core_mkey *umr_mkey)
508 {
509         int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
510         void *mkc;
511         u32 *in;
512         int err;
513
514         if (!MLX5E_VALID_NUM_MTTS(npages))
515                 return -EINVAL;
516
517         in = kvzalloc(inlen, GFP_KERNEL);
518         if (!in)
519                 return -ENOMEM;
520
521         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
522
523         MLX5_SET(mkc, mkc, free, 1);
524         MLX5_SET(mkc, mkc, umr_en, 1);
525         MLX5_SET(mkc, mkc, lw, 1);
526         MLX5_SET(mkc, mkc, lr, 1);
527         MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
528
529         MLX5_SET(mkc, mkc, qpn, 0xffffff);
530         MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
531         MLX5_SET64(mkc, mkc, len, npages << page_shift);
532         MLX5_SET(mkc, mkc, translations_octword_size,
533                  MLX5_MTT_OCTW(npages));
534         MLX5_SET(mkc, mkc, log_page_size, page_shift);
535
536         err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
537
538         kvfree(in);
539         return err;
540 }
541
542 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
543 {
544         u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
545
546         return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
547 }
548
549 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
550                           struct mlx5e_params *params,
551                           struct mlx5e_rq_param *rqp,
552                           struct mlx5e_rq *rq)
553 {
554         struct mlx5_core_dev *mdev = c->mdev;
555         void *rqc = rqp->rqc;
556         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
557         u32 byte_count;
558         int npages;
559         int wq_sz;
560         int err;
561         int i;
562
563         rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
564
565         err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
566                                 &rq->wq_ctrl);
567         if (err)
568                 return err;
569
570         rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
571
572         wq_sz = mlx5_wq_ll_get_size(&rq->wq);
573
574         rq->wq_type = params->rq_wq_type;
575         rq->pdev    = c->pdev;
576         rq->netdev  = c->netdev;
577         rq->tstamp  = c->tstamp;
578         rq->clock   = &mdev->clock;
579         rq->channel = c;
580         rq->ix      = c->ix;
581         rq->mdev    = mdev;
582
583         rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
584         if (IS_ERR(rq->xdp_prog)) {
585                 err = PTR_ERR(rq->xdp_prog);
586                 rq->xdp_prog = NULL;
587                 goto err_rq_wq_destroy;
588         }
589
590         rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
591         rq->buff.headroom = params->rq_headroom;
592
593         switch (rq->wq_type) {
594         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
595
596                 rq->post_wqes = mlx5e_post_rx_mpwqes;
597                 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
598
599                 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
600 #ifdef CONFIG_MLX5_EN_IPSEC
601                 if (MLX5_IPSEC_DEV(mdev)) {
602                         err = -EINVAL;
603                         netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
604                         goto err_rq_wq_destroy;
605                 }
606 #endif
607                 if (!rq->handle_rx_cqe) {
608                         err = -EINVAL;
609                         netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
610                         goto err_rq_wq_destroy;
611                 }
612
613                 rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
614                 rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
615
616                 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
617
618                 err = mlx5e_create_rq_umr_mkey(mdev, rq);
619                 if (err)
620                         goto err_rq_wq_destroy;
621                 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
622
623                 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
624                 if (err)
625                         goto err_destroy_umr_mkey;
626                 break;
627         default: /* MLX5_WQ_TYPE_LINKED_LIST */
628                 rq->wqe.frag_info =
629                         kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
630                                      GFP_KERNEL,
631                                      mlx5e_get_node(c->priv, c->ix));
632                 if (!rq->wqe.frag_info) {
633                         err = -ENOMEM;
634                         goto err_rq_wq_destroy;
635                 }
636                 rq->post_wqes = mlx5e_post_rx_wqes;
637                 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
638
639 #ifdef CONFIG_MLX5_EN_IPSEC
640                 if (c->priv->ipsec)
641                         rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
642                 else
643 #endif
644                         rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
645                 if (!rq->handle_rx_cqe) {
646                         kfree(rq->wqe.frag_info);
647                         err = -EINVAL;
648                         netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
649                         goto err_rq_wq_destroy;
650                 }
651
652                 byte_count = params->lro_en  ?
653                                 params->lro_wqe_sz :
654                                 MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
655 #ifdef CONFIG_MLX5_EN_IPSEC
656                 if (MLX5_IPSEC_DEV(mdev))
657                         byte_count += MLX5E_METADATA_ETHER_LEN;
658 #endif
659                 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
660
661                 /* calc the required page order */
662                 rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
663                 npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
664                 rq->buff.page_order = order_base_2(npages);
665
666                 byte_count |= MLX5_HW_START_PADDING;
667                 rq->mkey_be = c->mkey_be;
668         }
669
670         for (i = 0; i < wq_sz; i++) {
671                 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
672
673                 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
674                         u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;
675
676                         wqe->data.addr = cpu_to_be64(dma_offset);
677                 }
678
679                 wqe->data.byte_count = cpu_to_be32(byte_count);
680                 wqe->data.lkey = rq->mkey_be;
681         }
682
683         INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
684         rq->am.mode = params->rx_cq_period_mode;
685         rq->page_cache.head = 0;
686         rq->page_cache.tail = 0;
687
688         return 0;
689
690 err_destroy_umr_mkey:
691         mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
692
693 err_rq_wq_destroy:
694         if (rq->xdp_prog)
695                 bpf_prog_put(rq->xdp_prog);
696         mlx5_wq_destroy(&rq->wq_ctrl);
697
698         return err;
699 }
700
701 static void mlx5e_free_rq(struct mlx5e_rq *rq)
702 {
703         int i;
704
705         if (rq->xdp_prog)
706                 bpf_prog_put(rq->xdp_prog);
707
708         switch (rq->wq_type) {
709         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
710                 mlx5e_rq_free_mpwqe_info(rq);
711                 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
712                 break;
713         default: /* MLX5_WQ_TYPE_LINKED_LIST */
714                 kfree(rq->wqe.frag_info);
715         }
716
717         for (i = rq->page_cache.head; i != rq->page_cache.tail;
718              i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
719                 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
720
721                 mlx5e_page_release(rq, dma_info, false);
722         }
723         mlx5_wq_destroy(&rq->wq_ctrl);
724 }
725
726 static int mlx5e_create_rq(struct mlx5e_rq *rq,
727                            struct mlx5e_rq_param *param)
728 {
729         struct mlx5_core_dev *mdev = rq->mdev;
730
731         void *in;
732         void *rqc;
733         void *wq;
734         int inlen;
735         int err;
736
737         inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
738                 sizeof(u64) * rq->wq_ctrl.buf.npages;
739         in = kvzalloc(inlen, GFP_KERNEL);
740         if (!in)
741                 return -ENOMEM;
742
743         rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
744         wq  = MLX5_ADDR_OF(rqc, rqc, wq);
745
746         memcpy(rqc, param->rqc, sizeof(param->rqc));
747
748         MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
749         MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
750         MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
751                                                 MLX5_ADAPTER_PAGE_SHIFT);
752         MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
753
754         mlx5_fill_page_array(&rq->wq_ctrl.buf,
755                              (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
756
757         err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
758
759         kvfree(in);
760
761         return err;
762 }
763
764 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
765                                  int next_state)
766 {
767         struct mlx5e_channel *c = rq->channel;
768         struct mlx5_core_dev *mdev = c->mdev;
769
770         void *in;
771         void *rqc;
772         int inlen;
773         int err;
774
775         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
776         in = kvzalloc(inlen, GFP_KERNEL);
777         if (!in)
778                 return -ENOMEM;
779
780         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
781
782         MLX5_SET(modify_rq_in, in, rq_state, curr_state);
783         MLX5_SET(rqc, rqc, state, next_state);
784
785         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
786
787         kvfree(in);
788
789         return err;
790 }
791
792 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
793 {
794         struct mlx5e_channel *c = rq->channel;
795         struct mlx5e_priv *priv = c->priv;
796         struct mlx5_core_dev *mdev = priv->mdev;
797
798         void *in;
799         void *rqc;
800         int inlen;
801         int err;
802
803         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
804         in = kvzalloc(inlen, GFP_KERNEL);
805         if (!in)
806                 return -ENOMEM;
807
808         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
809
810         MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
811         MLX5_SET64(modify_rq_in, in, modify_bitmask,
812                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
813         MLX5_SET(rqc, rqc, scatter_fcs, enable);
814         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
815
816         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
817
818         kvfree(in);
819
820         return err;
821 }
822
823 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
824 {
825         struct mlx5e_channel *c = rq->channel;
826         struct mlx5_core_dev *mdev = c->mdev;
827         void *in;
828         void *rqc;
829         int inlen;
830         int err;
831
832         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
833         in = kvzalloc(inlen, GFP_KERNEL);
834         if (!in)
835                 return -ENOMEM;
836
837         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
838
839         MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
840         MLX5_SET64(modify_rq_in, in, modify_bitmask,
841                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
842         MLX5_SET(rqc, rqc, vsd, vsd);
843         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
844
845         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
846
847         kvfree(in);
848
849         return err;
850 }
851
852 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
853 {
854         mlx5_core_destroy_rq(rq->mdev, rq->rqn);
855 }
856
857 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
858 {
859         unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
860         struct mlx5e_channel *c = rq->channel;
861
862         struct mlx5_wq_ll *wq = &rq->wq;
863         u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
864
865         while (time_before(jiffies, exp_time)) {
866                 if (wq->cur_sz >= min_wqes)
867                         return 0;
868
869                 msleep(20);
870         }
871
872         netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
873                     rq->rqn, wq->cur_sz, min_wqes);
874         return -ETIMEDOUT;
875 }
876
877 static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
878 {
879         struct mlx5_wq_ll *wq = &rq->wq;
880         struct mlx5e_rx_wqe *wqe;
881         __be16 wqe_ix_be;
882         u16 wqe_ix;
883
884         /* UMR WQE (if in progress) is always at wq->head */
885         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
886             rq->mpwqe.umr_in_progress)
887                 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
888
889         while (!mlx5_wq_ll_is_empty(wq)) {
890                 wqe_ix_be = *wq->tail_next;
891                 wqe_ix    = be16_to_cpu(wqe_ix_be);
892                 wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
893                 rq->dealloc_wqe(rq, wqe_ix);
894                 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
895                                &wqe->next.next_wqe_index);
896         }
897
898         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
899                 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
900                  * but yet to be re-posted.
901                  */
902                 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
903
904                 for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
905                         rq->dealloc_wqe(rq, wqe_ix);
906         }
907 }
908
909 static int mlx5e_open_rq(struct mlx5e_channel *c,
910                          struct mlx5e_params *params,
911                          struct mlx5e_rq_param *param,
912                          struct mlx5e_rq *rq)
913 {
914         int err;
915
916         err = mlx5e_alloc_rq(c, params, param, rq);
917         if (err)
918                 return err;
919
920         err = mlx5e_create_rq(rq, param);
921         if (err)
922                 goto err_free_rq;
923
924         err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
925         if (err)
926                 goto err_destroy_rq;
927
928         if (params->rx_am_enabled)
929                 c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
930
931         return 0;
932
933 err_destroy_rq:
934         mlx5e_destroy_rq(rq);
935 err_free_rq:
936         mlx5e_free_rq(rq);
937
938         return err;
939 }
940
941 static void mlx5e_activate_rq(struct mlx5e_rq *rq)
942 {
943         struct mlx5e_icosq *sq = &rq->channel->icosq;
944         u16 pi = sq->pc & sq->wq.sz_m1;
945         struct mlx5e_tx_wqe *nopwqe;
946
947         set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
948         sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
949         nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
950         mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
951 }
952
953 static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
954 {
955         clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
956         napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
957 }
958
959 static void mlx5e_close_rq(struct mlx5e_rq *rq)
960 {
961         cancel_work_sync(&rq->am.work);
962         mlx5e_destroy_rq(rq);
963         mlx5e_free_rx_descs(rq);
964         mlx5e_free_rq(rq);
965 }
966
967 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
968 {
969         kfree(sq->db.di);
970 }
971
972 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
973 {
974         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
975
976         sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
977                                      GFP_KERNEL, numa);
978         if (!sq->db.di) {
979                 mlx5e_free_xdpsq_db(sq);
980                 return -ENOMEM;
981         }
982
983         return 0;
984 }
985
986 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
987                              struct mlx5e_params *params,
988                              struct mlx5e_sq_param *param,
989                              struct mlx5e_xdpsq *sq)
990 {
991         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
992         struct mlx5_core_dev *mdev = c->mdev;
993         int err;
994
995         sq->pdev      = c->pdev;
996         sq->mkey_be   = c->mkey_be;
997         sq->channel   = c;
998         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
999         sq->min_inline_mode = params->tx_min_inline_mode;
1000
1001         param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
1002         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1003         if (err)
1004                 return err;
1005         sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1006
1007         err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix));
1008         if (err)
1009                 goto err_sq_wq_destroy;
1010
1011         return 0;
1012
1013 err_sq_wq_destroy:
1014         mlx5_wq_destroy(&sq->wq_ctrl);
1015
1016         return err;
1017 }
1018
1019 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1020 {
1021         mlx5e_free_xdpsq_db(sq);
1022         mlx5_wq_destroy(&sq->wq_ctrl);
1023 }
1024
1025 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1026 {
1027         kfree(sq->db.ico_wqe);
1028 }
1029
1030 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1031 {
1032         u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1033
1034         sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
1035                                       GFP_KERNEL, numa);
1036         if (!sq->db.ico_wqe)
1037                 return -ENOMEM;
1038
1039         return 0;
1040 }
1041
1042 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1043                              struct mlx5e_sq_param *param,
1044                              struct mlx5e_icosq *sq)
1045 {
1046         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
1047         struct mlx5_core_dev *mdev = c->mdev;
1048         int err;
1049
1050         sq->mkey_be   = c->mkey_be;
1051         sq->channel   = c;
1052         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
1053
1054         param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
1055         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1056         if (err)
1057                 return err;
1058         sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1059
1060         err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix));
1061         if (err)
1062                 goto err_sq_wq_destroy;
1063
1064         sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
1065
1066         return 0;
1067
1068 err_sq_wq_destroy:
1069         mlx5_wq_destroy(&sq->wq_ctrl);
1070
1071         return err;
1072 }
1073
1074 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1075 {
1076         mlx5e_free_icosq_db(sq);
1077         mlx5_wq_destroy(&sq->wq_ctrl);
1078 }
1079
1080 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1081 {
1082         kfree(sq->db.wqe_info);
1083         kfree(sq->db.dma_fifo);
1084 }
1085
1086 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1087 {
1088         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1089         int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1090
1091         sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
1092                                            GFP_KERNEL, numa);
1093         sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
1094                                            GFP_KERNEL, numa);
1095         if (!sq->db.dma_fifo || !sq->db.wqe_info) {
1096                 mlx5e_free_txqsq_db(sq);
1097                 return -ENOMEM;
1098         }
1099
1100         sq->dma_fifo_mask = df_sz - 1;
1101
1102         return 0;
1103 }
1104
1105 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1106                              int txq_ix,
1107                              struct mlx5e_params *params,
1108                              struct mlx5e_sq_param *param,
1109                              struct mlx5e_txqsq *sq)
1110 {
1111         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
1112         struct mlx5_core_dev *mdev = c->mdev;
1113         int err;
1114
1115         sq->pdev      = c->pdev;
1116         sq->tstamp    = c->tstamp;
1117         sq->clock     = &mdev->clock;
1118         sq->mkey_be   = c->mkey_be;
1119         sq->channel   = c;
1120         sq->txq_ix    = txq_ix;
1121         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
1122         sq->max_inline      = params->tx_max_inline;
1123         sq->min_inline_mode = params->tx_min_inline_mode;
1124         if (MLX5_IPSEC_DEV(c->priv->mdev))
1125                 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1126
1127         param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
1128         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1129         if (err)
1130                 return err;
1131         sq->wq.db    = &sq->wq.db[MLX5_SND_DBR];
1132
1133         err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix));
1134         if (err)
1135                 goto err_sq_wq_destroy;
1136
1137         sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
1138
1139         return 0;
1140
1141 err_sq_wq_destroy:
1142         mlx5_wq_destroy(&sq->wq_ctrl);
1143
1144         return err;
1145 }
1146
1147 static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1148 {
1149         mlx5e_free_txqsq_db(sq);
1150         mlx5_wq_destroy(&sq->wq_ctrl);
1151 }
1152
1153 struct mlx5e_create_sq_param {
1154         struct mlx5_wq_ctrl        *wq_ctrl;
1155         u32                         cqn;
1156         u32                         tisn;
1157         u8                          tis_lst_sz;
1158         u8                          min_inline_mode;
1159 };
1160
1161 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1162                            struct mlx5e_sq_param *param,
1163                            struct mlx5e_create_sq_param *csp,
1164                            u32 *sqn)
1165 {
1166         void *in;
1167         void *sqc;
1168         void *wq;
1169         int inlen;
1170         int err;
1171
1172         inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1173                 sizeof(u64) * csp->wq_ctrl->buf.npages;
1174         in = kvzalloc(inlen, GFP_KERNEL);
1175         if (!in)
1176                 return -ENOMEM;
1177
1178         sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1179         wq = MLX5_ADDR_OF(sqc, sqc, wq);
1180
1181         memcpy(sqc, param->sqc, sizeof(param->sqc));
1182         MLX5_SET(sqc,  sqc, tis_lst_sz, csp->tis_lst_sz);
1183         MLX5_SET(sqc,  sqc, tis_num_0, csp->tisn);
1184         MLX5_SET(sqc,  sqc, cqn, csp->cqn);
1185
1186         if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1187                 MLX5_SET(sqc,  sqc, min_wqe_inline_mode, csp->min_inline_mode);
1188
1189         MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
1190
1191         MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
1192         MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.bfreg.index);
1193         MLX5_SET(wq,   wq, log_wq_pg_sz,  csp->wq_ctrl->buf.page_shift -
1194                                           MLX5_ADAPTER_PAGE_SHIFT);
1195         MLX5_SET64(wq, wq, dbr_addr,      csp->wq_ctrl->db.dma);
1196
1197         mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1198
1199         err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1200
1201         kvfree(in);
1202
1203         return err;
1204 }
1205
1206 struct mlx5e_modify_sq_param {
1207         int curr_state;
1208         int next_state;
1209         bool rl_update;
1210         int rl_index;
1211 };
1212
1213 static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1214                            struct mlx5e_modify_sq_param *p)
1215 {
1216         void *in;
1217         void *sqc;
1218         int inlen;
1219         int err;
1220
1221         inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1222         in = kvzalloc(inlen, GFP_KERNEL);
1223         if (!in)
1224                 return -ENOMEM;
1225
1226         sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1227
1228         MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1229         MLX5_SET(sqc, sqc, state, p->next_state);
1230         if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1231                 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1232                 MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, p->rl_index);
1233         }
1234
1235         err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
1236
1237         kvfree(in);
1238
1239         return err;
1240 }
1241
1242 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1243 {
1244         mlx5_core_destroy_sq(mdev, sqn);
1245 }
1246
1247 static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1248                                struct mlx5e_sq_param *param,
1249                                struct mlx5e_create_sq_param *csp,
1250                                u32 *sqn)
1251 {
1252         struct mlx5e_modify_sq_param msp = {0};
1253         int err;
1254
1255         err = mlx5e_create_sq(mdev, param, csp, sqn);
1256         if (err)
1257                 return err;
1258
1259         msp.curr_state = MLX5_SQC_STATE_RST;
1260         msp.next_state = MLX5_SQC_STATE_RDY;
1261         err = mlx5e_modify_sq(mdev, *sqn, &msp);
1262         if (err)
1263                 mlx5e_destroy_sq(mdev, *sqn);
1264
1265         return err;
1266 }
1267
1268 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1269                                 struct mlx5e_txqsq *sq, u32 rate);
1270
1271 static int mlx5e_open_txqsq(struct mlx5e_channel *c,
1272                             u32 tisn,
1273                             int txq_ix,
1274                             struct mlx5e_params *params,
1275                             struct mlx5e_sq_param *param,
1276                             struct mlx5e_txqsq *sq)
1277 {
1278         struct mlx5e_create_sq_param csp = {};
1279         u32 tx_rate;
1280         int err;
1281
1282         err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
1283         if (err)
1284                 return err;
1285
1286         csp.tisn            = tisn;
1287         csp.tis_lst_sz      = 1;
1288         csp.cqn             = sq->cq.mcq.cqn;
1289         csp.wq_ctrl         = &sq->wq_ctrl;
1290         csp.min_inline_mode = sq->min_inline_mode;
1291         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1292         if (err)
1293                 goto err_free_txqsq;
1294
1295         tx_rate = c->priv->tx_rates[sq->txq_ix];
1296         if (tx_rate)
1297                 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1298
1299         return 0;
1300
1301 err_free_txqsq:
1302         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1303         mlx5e_free_txqsq(sq);
1304
1305         return err;
1306 }
1307
1308 static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1309 {
1310         sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1311         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1312         netdev_tx_reset_queue(sq->txq);
1313         netif_tx_start_queue(sq->txq);
1314 }
1315
1316 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1317 {
1318         __netif_tx_lock_bh(txq);
1319         netif_tx_stop_queue(txq);
1320         __netif_tx_unlock_bh(txq);
1321 }
1322
1323 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1324 {
1325         struct mlx5e_channel *c = sq->channel;
1326
1327         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1328         /* prevent netif_tx_wake_queue */
1329         napi_synchronize(&c->napi);
1330
1331         netif_tx_disable_queue(sq->txq);
1332
1333         /* last doorbell out, godspeed .. */
1334         if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
1335                 struct mlx5e_tx_wqe *nop;
1336
1337                 sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
1338                 nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
1339                 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
1340         }
1341 }
1342
1343 static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1344 {
1345         struct mlx5e_channel *c = sq->channel;
1346         struct mlx5_core_dev *mdev = c->mdev;
1347
1348         mlx5e_destroy_sq(mdev, sq->sqn);
1349         if (sq->rate_limit)
1350                 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1351         mlx5e_free_txqsq_descs(sq);
1352         mlx5e_free_txqsq(sq);
1353 }
1354
1355 static int mlx5e_open_icosq(struct mlx5e_channel *c,
1356                             struct mlx5e_params *params,
1357                             struct mlx5e_sq_param *param,
1358                             struct mlx5e_icosq *sq)
1359 {
1360         struct mlx5e_create_sq_param csp = {};
1361         int err;
1362
1363         err = mlx5e_alloc_icosq(c, param, sq);
1364         if (err)
1365                 return err;
1366
1367         csp.cqn             = sq->cq.mcq.cqn;
1368         csp.wq_ctrl         = &sq->wq_ctrl;
1369         csp.min_inline_mode = params->tx_min_inline_mode;
1370         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1371         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1372         if (err)
1373                 goto err_free_icosq;
1374
1375         return 0;
1376
1377 err_free_icosq:
1378         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1379         mlx5e_free_icosq(sq);
1380
1381         return err;
1382 }
1383
1384 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1385 {
1386         struct mlx5e_channel *c = sq->channel;
1387
1388         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1389         napi_synchronize(&c->napi);
1390
1391         mlx5e_destroy_sq(c->mdev, sq->sqn);
1392         mlx5e_free_icosq(sq);
1393 }
1394
1395 static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
1396                             struct mlx5e_params *params,
1397                             struct mlx5e_sq_param *param,
1398                             struct mlx5e_xdpsq *sq)
1399 {
1400         unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1401         struct mlx5e_create_sq_param csp = {};
1402         unsigned int inline_hdr_sz = 0;
1403         int err;
1404         int i;
1405
1406         err = mlx5e_alloc_xdpsq(c, params, param, sq);
1407         if (err)
1408                 return err;
1409
1410         csp.tis_lst_sz      = 1;
1411         csp.tisn            = c->priv->tisn[0]; /* tc = 0 */
1412         csp.cqn             = sq->cq.mcq.cqn;
1413         csp.wq_ctrl         = &sq->wq_ctrl;
1414         csp.min_inline_mode = sq->min_inline_mode;
1415         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1416         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1417         if (err)
1418                 goto err_free_xdpsq;
1419
1420         if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1421                 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1422                 ds_cnt++;
1423         }
1424
1425         /* Pre initialize fixed WQE fields */
1426         for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1427                 struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1428                 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1429                 struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
1430                 struct mlx5_wqe_data_seg *dseg;
1431
1432                 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1433                 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1434
1435                 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1436                 dseg->lkey = sq->mkey_be;
1437         }
1438
1439         return 0;
1440
1441 err_free_xdpsq:
1442         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1443         mlx5e_free_xdpsq(sq);
1444
1445         return err;
1446 }
1447
1448 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1449 {
1450         struct mlx5e_channel *c = sq->channel;
1451
1452         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1453         napi_synchronize(&c->napi);
1454
1455         mlx5e_destroy_sq(c->mdev, sq->sqn);
1456         mlx5e_free_xdpsq_descs(sq);
1457         mlx5e_free_xdpsq(sq);
1458 }
1459
1460 static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1461                                  struct mlx5e_cq_param *param,
1462                                  struct mlx5e_cq *cq)
1463 {
1464         struct mlx5_core_cq *mcq = &cq->mcq;
1465         int eqn_not_used;
1466         unsigned int irqn;
1467         int err;
1468         u32 i;
1469
1470         err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1471                                &cq->wq_ctrl);
1472         if (err)
1473                 return err;
1474
1475         mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1476
1477         mcq->cqe_sz     = 64;
1478         mcq->set_ci_db  = cq->wq_ctrl.db.db;
1479         mcq->arm_db     = cq->wq_ctrl.db.db + 1;
1480         *mcq->set_ci_db = 0;
1481         *mcq->arm_db    = 0;
1482         mcq->vector     = param->eq_ix;
1483         mcq->comp       = mlx5e_completion_event;
1484         mcq->event      = mlx5e_cq_error_event;
1485         mcq->irqn       = irqn;
1486
1487         for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1488                 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1489
1490                 cqe->op_own = 0xf1;
1491         }
1492
1493         cq->mdev = mdev;
1494
1495         return 0;
1496 }
1497
1498 static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1499                           struct mlx5e_cq_param *param,
1500                           struct mlx5e_cq *cq)
1501 {
1502         struct mlx5_core_dev *mdev = c->priv->mdev;
1503         int err;
1504
1505         param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix);
1506         param->wq.db_numa_node  = mlx5e_get_node(c->priv, c->ix);
1507         param->eq_ix   = c->ix;
1508
1509         err = mlx5e_alloc_cq_common(mdev, param, cq);
1510
1511         cq->napi    = &c->napi;
1512         cq->channel = c;
1513
1514         return err;
1515 }
1516
1517 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1518 {
1519         mlx5_cqwq_destroy(&cq->wq_ctrl);
1520 }
1521
1522 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1523 {
1524         struct mlx5_core_dev *mdev = cq->mdev;
1525         struct mlx5_core_cq *mcq = &cq->mcq;
1526
1527         void *in;
1528         void *cqc;
1529         int inlen;
1530         unsigned int irqn_not_used;
1531         int eqn;
1532         int err;
1533
1534         inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1535                 sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
1536         in = kvzalloc(inlen, GFP_KERNEL);
1537         if (!in)
1538                 return -ENOMEM;
1539
1540         cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1541
1542         memcpy(cqc, param->cqc, sizeof(param->cqc));
1543
1544         mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
1545                                   (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1546
1547         mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1548
1549         MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
1550         MLX5_SET(cqc,   cqc, c_eqn,         eqn);
1551         MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
1552         MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
1553                                             MLX5_ADAPTER_PAGE_SHIFT);
1554         MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
1555
1556         err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1557
1558         kvfree(in);
1559
1560         if (err)
1561                 return err;
1562
1563         mlx5e_cq_arm(cq);
1564
1565         return 0;
1566 }
1567
1568 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1569 {
1570         mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1571 }
1572
1573 static int mlx5e_open_cq(struct mlx5e_channel *c,
1574                          struct mlx5e_cq_moder moder,
1575                          struct mlx5e_cq_param *param,
1576                          struct mlx5e_cq *cq)
1577 {
1578         struct mlx5_core_dev *mdev = c->mdev;
1579         int err;
1580
1581         err = mlx5e_alloc_cq(c, param, cq);
1582         if (err)
1583                 return err;
1584
1585         err = mlx5e_create_cq(cq, param);
1586         if (err)
1587                 goto err_free_cq;
1588
1589         if (MLX5_CAP_GEN(mdev, cq_moderation))
1590                 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1591         return 0;
1592
1593 err_free_cq:
1594         mlx5e_free_cq(cq);
1595
1596         return err;
1597 }
1598
1599 static void mlx5e_close_cq(struct mlx5e_cq *cq)
1600 {
1601         mlx5e_destroy_cq(cq);
1602         mlx5e_free_cq(cq);
1603 }
1604
1605 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1606                              struct mlx5e_params *params,
1607                              struct mlx5e_channel_param *cparam)
1608 {
1609         int err;
1610         int tc;
1611
1612         for (tc = 0; tc < c->num_tc; tc++) {
1613                 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1614                                     &cparam->tx_cq, &c->sq[tc].cq);
1615                 if (err)
1616                         goto err_close_tx_cqs;
1617         }
1618
1619         return 0;
1620
1621 err_close_tx_cqs:
1622         for (tc--; tc >= 0; tc--)
1623                 mlx5e_close_cq(&c->sq[tc].cq);
1624
1625         return err;
1626 }
1627
1628 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1629 {
1630         int tc;
1631
1632         for (tc = 0; tc < c->num_tc; tc++)
1633                 mlx5e_close_cq(&c->sq[tc].cq);
1634 }
1635
1636 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1637                           struct mlx5e_params *params,
1638                           struct mlx5e_channel_param *cparam)
1639 {
1640         int err;
1641         int tc;
1642
1643         for (tc = 0; tc < params->num_tc; tc++) {
1644                 int txq_ix = c->ix + tc * params->num_channels;
1645
1646                 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1647                                        params, &cparam->sq, &c->sq[tc]);
1648                 if (err)
1649                         goto err_close_sqs;
1650         }
1651
1652         return 0;
1653
1654 err_close_sqs:
1655         for (tc--; tc >= 0; tc--)
1656                 mlx5e_close_txqsq(&c->sq[tc]);
1657
1658         return err;
1659 }
1660
1661 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1662 {
1663         int tc;
1664
1665         for (tc = 0; tc < c->num_tc; tc++)
1666                 mlx5e_close_txqsq(&c->sq[tc]);
1667 }
1668
1669 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1670                                 struct mlx5e_txqsq *sq, u32 rate)
1671 {
1672         struct mlx5e_priv *priv = netdev_priv(dev);
1673         struct mlx5_core_dev *mdev = priv->mdev;
1674         struct mlx5e_modify_sq_param msp = {0};
1675         u16 rl_index = 0;
1676         int err;
1677
1678         if (rate == sq->rate_limit)
1679                 /* nothing to do */
1680                 return 0;
1681
1682         if (sq->rate_limit)
1683                 /* remove current rl index to free space to next ones */
1684                 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1685
1686         sq->rate_limit = 0;
1687
1688         if (rate) {
1689                 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1690                 if (err) {
1691                         netdev_err(dev, "Failed configuring rate %u: %d\n",
1692                                    rate, err);
1693                         return err;
1694                 }
1695         }
1696
1697         msp.curr_state = MLX5_SQC_STATE_RDY;
1698         msp.next_state = MLX5_SQC_STATE_RDY;
1699         msp.rl_index   = rl_index;
1700         msp.rl_update  = true;
1701         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1702         if (err) {
1703                 netdev_err(dev, "Failed configuring rate %u: %d\n",
1704                            rate, err);
1705                 /* remove the rate from the table */
1706                 if (rate)
1707                         mlx5_rl_remove_rate(mdev, rate);
1708                 return err;
1709         }
1710
1711         sq->rate_limit = rate;
1712         return 0;
1713 }
1714
1715 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1716 {
1717         struct mlx5e_priv *priv = netdev_priv(dev);
1718         struct mlx5_core_dev *mdev = priv->mdev;
1719         struct mlx5e_txqsq *sq = priv->txq2sq[index];
1720         int err = 0;
1721
1722         if (!mlx5_rl_is_supported(mdev)) {
1723                 netdev_err(dev, "Rate limiting is not supported on this device\n");
1724                 return -EINVAL;
1725         }
1726
1727         /* rate is given in Mb/sec, HW config is in Kb/sec */
1728         rate = rate << 10;
1729
1730         /* Check whether rate in valid range, 0 is always valid */
1731         if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1732                 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1733                 return -ERANGE;
1734         }
1735
1736         mutex_lock(&priv->state_lock);
1737         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1738                 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1739         if (!err)
1740                 priv->tx_rates[index] = rate;
1741         mutex_unlock(&priv->state_lock);
1742
1743         return err;
1744 }
1745
1746 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1747                               struct mlx5e_params *params,
1748                               struct mlx5e_channel_param *cparam,
1749                               struct mlx5e_channel **cp)
1750 {
1751         struct mlx5e_cq_moder icocq_moder = {0, 0};
1752         struct net_device *netdev = priv->netdev;
1753         struct mlx5e_channel *c;
1754         unsigned int irq;
1755         int err;
1756         int eqn;
1757
1758         c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix));
1759         if (!c)
1760                 return -ENOMEM;
1761
1762         c->priv     = priv;
1763         c->mdev     = priv->mdev;
1764         c->tstamp   = &priv->tstamp;
1765         c->ix       = ix;
1766         c->pdev     = &priv->mdev->pdev->dev;
1767         c->netdev   = priv->netdev;
1768         c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1769         c->num_tc   = params->num_tc;
1770         c->xdp      = !!params->xdp_prog;
1771
1772         mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1773         c->irq_desc = irq_to_desc(irq);
1774
1775         netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1776
1777         err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
1778         if (err)
1779                 goto err_napi_del;
1780
1781         err = mlx5e_open_tx_cqs(c, params, cparam);
1782         if (err)
1783                 goto err_close_icosq_cq;
1784
1785         err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1786         if (err)
1787                 goto err_close_tx_cqs;
1788
1789         /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1790         err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1791                                      &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
1792         if (err)
1793                 goto err_close_rx_cq;
1794
1795         napi_enable(&c->napi);
1796
1797         err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1798         if (err)
1799                 goto err_disable_napi;
1800
1801         err = mlx5e_open_sqs(c, params, cparam);
1802         if (err)
1803                 goto err_close_icosq;
1804
1805         err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
1806         if (err)
1807                 goto err_close_sqs;
1808
1809         err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
1810         if (err)
1811                 goto err_close_xdp_sq;
1812
1813         *cp = c;
1814
1815         return 0;
1816 err_close_xdp_sq:
1817         if (c->xdp)
1818                 mlx5e_close_xdpsq(&c->rq.xdpsq);
1819
1820 err_close_sqs:
1821         mlx5e_close_sqs(c);
1822
1823 err_close_icosq:
1824         mlx5e_close_icosq(&c->icosq);
1825
1826 err_disable_napi:
1827         napi_disable(&c->napi);
1828         if (c->xdp)
1829                 mlx5e_close_cq(&c->rq.xdpsq.cq);
1830
1831 err_close_rx_cq:
1832         mlx5e_close_cq(&c->rq.cq);
1833
1834 err_close_tx_cqs:
1835         mlx5e_close_tx_cqs(c);
1836
1837 err_close_icosq_cq:
1838         mlx5e_close_cq(&c->icosq.cq);
1839
1840 err_napi_del:
1841         netif_napi_del(&c->napi);
1842         kfree(c);
1843
1844         return err;
1845 }
1846
1847 static void mlx5e_activate_channel(struct mlx5e_channel *c)
1848 {
1849         int tc;
1850
1851         for (tc = 0; tc < c->num_tc; tc++)
1852                 mlx5e_activate_txqsq(&c->sq[tc]);
1853         mlx5e_activate_rq(&c->rq);
1854         netif_set_xps_queue(c->netdev,
1855                 mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
1856 }
1857
1858 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1859 {
1860         int tc;
1861
1862         mlx5e_deactivate_rq(&c->rq);
1863         for (tc = 0; tc < c->num_tc; tc++)
1864                 mlx5e_deactivate_txqsq(&c->sq[tc]);
1865 }
1866
1867 static void mlx5e_close_channel(struct mlx5e_channel *c)
1868 {
1869         mlx5e_close_rq(&c->rq);
1870         if (c->xdp)
1871                 mlx5e_close_xdpsq(&c->rq.xdpsq);
1872         mlx5e_close_sqs(c);
1873         mlx5e_close_icosq(&c->icosq);
1874         napi_disable(&c->napi);
1875         if (c->xdp)
1876                 mlx5e_close_cq(&c->rq.xdpsq.cq);
1877         mlx5e_close_cq(&c->rq.cq);
1878         mlx5e_close_tx_cqs(c);
1879         mlx5e_close_cq(&c->icosq.cq);
1880         netif_napi_del(&c->napi);
1881
1882         kfree(c);
1883 }
1884
1885 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1886                                  struct mlx5e_params *params,
1887                                  struct mlx5e_rq_param *param)
1888 {
1889         void *rqc = param->rqc;
1890         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1891
1892         switch (params->rq_wq_type) {
1893         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1894                 MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
1895                 MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
1896                 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1897                 break;
1898         default: /* MLX5_WQ_TYPE_LINKED_LIST */
1899                 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1900         }
1901
1902         MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1903         MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
1904         MLX5_SET(wq, wq, log_wq_sz,        params->log_rq_size);
1905         MLX5_SET(wq, wq, pd,               priv->mdev->mlx5e_res.pdn);
1906         MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
1907         MLX5_SET(rqc, rqc, vsd,            params->vlan_strip_disable);
1908         MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
1909
1910         param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1911         param->wq.linear = 1;
1912 }
1913
1914 static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1915 {
1916         void *rqc = param->rqc;
1917         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1918
1919         MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1920         MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
1921 }
1922
1923 static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1924                                         struct mlx5e_sq_param *param)
1925 {
1926         void *sqc = param->sqc;
1927         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1928
1929         MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1930         MLX5_SET(wq, wq, pd,            priv->mdev->mlx5e_res.pdn);
1931
1932         param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1933 }
1934
1935 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1936                                  struct mlx5e_params *params,
1937                                  struct mlx5e_sq_param *param)
1938 {
1939         void *sqc = param->sqc;
1940         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1941
1942         mlx5e_build_sq_param_common(priv, param);
1943         MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1944         MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
1945 }
1946
1947 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1948                                         struct mlx5e_cq_param *param)
1949 {
1950         void *cqc = param->cqc;
1951
1952         MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
1953 }
1954
1955 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1956                                     struct mlx5e_params *params,
1957                                     struct mlx5e_cq_param *param)
1958 {
1959         void *cqc = param->cqc;
1960         u8 log_cq_size;
1961
1962         switch (params->rq_wq_type) {
1963         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1964                 log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
1965                 break;
1966         default: /* MLX5_WQ_TYPE_LINKED_LIST */
1967                 log_cq_size = params->log_rq_size;
1968         }
1969
1970         MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
1971         if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
1972                 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1973                 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1974         }
1975
1976         mlx5e_build_common_cq_param(priv, param);
1977         param->cq_period_mode = params->rx_cq_period_mode;
1978 }
1979
1980 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1981                                     struct mlx5e_params *params,
1982                                     struct mlx5e_cq_param *param)
1983 {
1984         void *cqc = param->cqc;
1985
1986         MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
1987
1988         mlx5e_build_common_cq_param(priv, param);
1989
1990         param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1991 }
1992
1993 static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
1994                                      u8 log_wq_size,
1995                                      struct mlx5e_cq_param *param)
1996 {
1997         void *cqc = param->cqc;
1998
1999         MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2000
2001         mlx5e_build_common_cq_param(priv, param);
2002
2003         param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2004 }
2005
2006 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2007                                     u8 log_wq_size,
2008                                     struct mlx5e_sq_param *param)
2009 {
2010         void *sqc = param->sqc;
2011         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2012
2013         mlx5e_build_sq_param_common(priv, param);
2014
2015         MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
2016         MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
2017 }
2018
2019 static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2020                                     struct mlx5e_params *params,
2021                                     struct mlx5e_sq_param *param)
2022 {
2023         void *sqc = param->sqc;
2024         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2025
2026         mlx5e_build_sq_param_common(priv, param);
2027         MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2028 }
2029
2030 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2031                                       struct mlx5e_params *params,
2032                                       struct mlx5e_channel_param *cparam)
2033 {
2034         u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2035
2036         mlx5e_build_rq_param(priv, params, &cparam->rq);
2037         mlx5e_build_sq_param(priv, params, &cparam->sq);
2038         mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2039         mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2040         mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2041         mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2042         mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
2043 }
2044
2045 int mlx5e_open_channels(struct mlx5e_priv *priv,
2046                         struct mlx5e_channels *chs)
2047 {
2048         struct mlx5e_channel_param *cparam;
2049         int err = -ENOMEM;
2050         int i;
2051
2052         chs->num = chs->params.num_channels;
2053
2054         chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2055         cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2056         if (!chs->c || !cparam)
2057                 goto err_free;
2058
2059         mlx5e_build_channel_param(priv, &chs->params, cparam);
2060         for (i = 0; i < chs->num; i++) {
2061                 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
2062                 if (err)
2063                         goto err_close_channels;
2064         }
2065
2066         kfree(cparam);
2067         return 0;
2068
2069 err_close_channels:
2070         for (i--; i >= 0; i--)
2071                 mlx5e_close_channel(chs->c[i]);
2072
2073 err_free:
2074         kfree(chs->c);
2075         kfree(cparam);
2076         chs->num = 0;
2077         return err;
2078 }
2079
2080 static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2081 {
2082         int i;
2083
2084         for (i = 0; i < chs->num; i++)
2085                 mlx5e_activate_channel(chs->c[i]);
2086 }
2087
2088 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2089 {
2090         int err = 0;
2091         int i;
2092
2093         for (i = 0; i < chs->num; i++) {
2094                 err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
2095                 if (err)
2096                         break;
2097         }
2098
2099         return err;
2100 }
2101
2102 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2103 {
2104         int i;
2105
2106         for (i = 0; i < chs->num; i++)
2107                 mlx5e_deactivate_channel(chs->c[i]);
2108 }
2109
2110 void mlx5e_close_channels(struct mlx5e_channels *chs)
2111 {
2112         int i;
2113
2114         for (i = 0; i < chs->num; i++)
2115                 mlx5e_close_channel(chs->c[i]);
2116
2117         kfree(chs->c);
2118         chs->num = 0;
2119 }
2120
2121 static int
2122 mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
2123 {
2124         struct mlx5_core_dev *mdev = priv->mdev;
2125         void *rqtc;
2126         int inlen;
2127         int err;
2128         u32 *in;
2129         int i;
2130
2131         inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2132         in = kvzalloc(inlen, GFP_KERNEL);
2133         if (!in)
2134                 return -ENOMEM;
2135
2136         rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2137
2138         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2139         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2140
2141         for (i = 0; i < sz; i++)
2142                 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2143
2144         err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2145         if (!err)
2146                 rqt->enabled = true;
2147
2148         kvfree(in);
2149         return err;
2150 }
2151
2152 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
2153 {
2154         rqt->enabled = false;
2155         mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
2156 }
2157
2158 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2159 {
2160         struct mlx5e_rqt *rqt = &priv->indir_rqt;
2161         int err;
2162
2163         err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2164         if (err)
2165                 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2166         return err;
2167 }
2168
2169 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
2170 {
2171         struct mlx5e_rqt *rqt;
2172         int err;
2173         int ix;
2174
2175         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2176                 rqt = &priv->direct_tir[ix].rqt;
2177                 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
2178                 if (err)
2179                         goto err_destroy_rqts;
2180         }
2181
2182         return 0;
2183
2184 err_destroy_rqts:
2185         mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
2186         for (ix--; ix >= 0; ix--)
2187                 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
2188
2189         return err;
2190 }
2191
2192 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2193 {
2194         int i;
2195
2196         for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2197                 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2198 }
2199
2200 static int mlx5e_rx_hash_fn(int hfunc)
2201 {
2202         return (hfunc == ETH_RSS_HASH_TOP) ?
2203                MLX5_RX_HASH_FN_TOEPLITZ :
2204                MLX5_RX_HASH_FN_INVERTED_XOR8;
2205 }
2206
2207 static int mlx5e_bits_invert(unsigned long a, int size)
2208 {
2209         int inv = 0;
2210         int i;
2211
2212         for (i = 0; i < size; i++)
2213                 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2214
2215         return inv;
2216 }
2217
2218 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2219                                 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2220 {
2221         int i;
2222
2223         for (i = 0; i < sz; i++) {
2224                 u32 rqn;
2225
2226                 if (rrp.is_rss) {
2227                         int ix = i;
2228
2229                         if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2230                                 ix = mlx5e_bits_invert(i, ilog2(sz));
2231
2232                         ix = priv->channels.params.indirection_rqt[ix];
2233                         rqn = rrp.rss.channels->c[ix]->rq.rqn;
2234                 } else {
2235                         rqn = rrp.rqn;
2236                 }
2237                 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2238         }
2239 }
2240
2241 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2242                        struct mlx5e_redirect_rqt_param rrp)
2243 {
2244         struct mlx5_core_dev *mdev = priv->mdev;
2245         void *rqtc;
2246         int inlen;
2247         u32 *in;
2248         int err;
2249
2250         inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2251         in = kvzalloc(inlen, GFP_KERNEL);
2252         if (!in)
2253                 return -ENOMEM;
2254
2255         rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2256
2257         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2258         MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2259         mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
2260         err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
2261
2262         kvfree(in);
2263         return err;
2264 }
2265
2266 static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2267                                 struct mlx5e_redirect_rqt_param rrp)
2268 {
2269         if (!rrp.is_rss)
2270                 return rrp.rqn;
2271
2272         if (ix >= rrp.rss.channels->num)
2273                 return priv->drop_rq.rqn;
2274
2275         return rrp.rss.channels->c[ix]->rq.rqn;
2276 }
2277
2278 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2279                                 struct mlx5e_redirect_rqt_param rrp)
2280 {
2281         u32 rqtn;
2282         int ix;
2283
2284         if (priv->indir_rqt.enabled) {
2285                 /* RSS RQ table */
2286                 rqtn = priv->indir_rqt.rqtn;
2287                 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2288         }
2289
2290         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2291                 struct mlx5e_redirect_rqt_param direct_rrp = {
2292                         .is_rss = false,
2293                         {
2294                                 .rqn    = mlx5e_get_direct_rqn(priv, ix, rrp)
2295                         },
2296                 };
2297
2298                 /* Direct RQ Tables */
2299                 if (!priv->direct_tir[ix].rqt.enabled)
2300                         continue;
2301
2302                 rqtn = priv->direct_tir[ix].rqt.rqtn;
2303                 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
2304         }
2305 }
2306
2307 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2308                                             struct mlx5e_channels *chs)
2309 {
2310         struct mlx5e_redirect_rqt_param rrp = {
2311                 .is_rss        = true,
2312                 {
2313                         .rss = {
2314                                 .channels  = chs,
2315                                 .hfunc     = chs->params.rss_hfunc,
2316                         }
2317                 },
2318         };
2319
2320         mlx5e_redirect_rqts(priv, rrp);
2321 }
2322
2323 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2324 {
2325         struct mlx5e_redirect_rqt_param drop_rrp = {
2326                 .is_rss = false,
2327                 {
2328                         .rqn = priv->drop_rq.rqn,
2329                 },
2330         };
2331
2332         mlx5e_redirect_rqts(priv, drop_rrp);
2333 }
2334
2335 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
2336 {
2337         if (!params->lro_en)
2338                 return;
2339
2340 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2341
2342         MLX5_SET(tirc, tirc, lro_enable_mask,
2343                  MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2344                  MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2345         MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2346                  (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2347         MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
2348 }
2349
2350 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2351                                     enum mlx5e_traffic_types tt,
2352                                     void *tirc, bool inner)
2353 {
2354         void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2355                              MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2356
2357 #define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2358                                  MLX5_HASH_FIELD_SEL_DST_IP)
2359
2360 #define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2361                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
2362                                  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2363                                  MLX5_HASH_FIELD_SEL_L4_DPORT)
2364
2365 #define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2366                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
2367                                  MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2368
2369         MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2370         if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
2371                 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2372                                              rx_hash_toeplitz_key);
2373                 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2374                                                rx_hash_toeplitz_key);
2375
2376                 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2377                 memcpy(rss_key, params->toeplitz_hash_key, len);
2378         }
2379
2380         switch (tt) {
2381         case MLX5E_TT_IPV4_TCP:
2382                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2383                          MLX5_L3_PROT_TYPE_IPV4);
2384                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2385                          MLX5_L4_PROT_TYPE_TCP);
2386                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2387                          MLX5_HASH_IP_L4PORTS);
2388                 break;
2389
2390         case MLX5E_TT_IPV6_TCP:
2391                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2392                          MLX5_L3_PROT_TYPE_IPV6);
2393                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2394                          MLX5_L4_PROT_TYPE_TCP);
2395                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2396                          MLX5_HASH_IP_L4PORTS);
2397                 break;
2398
2399         case MLX5E_TT_IPV4_UDP:
2400                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2401                          MLX5_L3_PROT_TYPE_IPV4);
2402                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2403                          MLX5_L4_PROT_TYPE_UDP);
2404                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2405                          MLX5_HASH_IP_L4PORTS);
2406                 break;
2407
2408         case MLX5E_TT_IPV6_UDP:
2409                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2410                          MLX5_L3_PROT_TYPE_IPV6);
2411                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2412                          MLX5_L4_PROT_TYPE_UDP);
2413                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2414                          MLX5_HASH_IP_L4PORTS);
2415                 break;
2416
2417         case MLX5E_TT_IPV4_IPSEC_AH:
2418                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2419                          MLX5_L3_PROT_TYPE_IPV4);
2420                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2421                          MLX5_HASH_IP_IPSEC_SPI);
2422                 break;
2423
2424         case MLX5E_TT_IPV6_IPSEC_AH:
2425                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2426                          MLX5_L3_PROT_TYPE_IPV6);
2427                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2428                          MLX5_HASH_IP_IPSEC_SPI);
2429                 break;
2430
2431         case MLX5E_TT_IPV4_IPSEC_ESP:
2432                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2433                          MLX5_L3_PROT_TYPE_IPV4);
2434                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2435                          MLX5_HASH_IP_IPSEC_SPI);
2436                 break;
2437
2438         case MLX5E_TT_IPV6_IPSEC_ESP:
2439                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2440                          MLX5_L3_PROT_TYPE_IPV6);
2441                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2442                          MLX5_HASH_IP_IPSEC_SPI);
2443                 break;
2444
2445         case MLX5E_TT_IPV4:
2446                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2447                          MLX5_L3_PROT_TYPE_IPV4);
2448                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2449                          MLX5_HASH_IP);
2450                 break;
2451
2452         case MLX5E_TT_IPV6:
2453                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2454                          MLX5_L3_PROT_TYPE_IPV6);
2455                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2456                          MLX5_HASH_IP);
2457                 break;
2458         default:
2459                 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2460         }
2461 }
2462
2463 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2464 {
2465         struct mlx5_core_dev *mdev = priv->mdev;
2466
2467         void *in;
2468         void *tirc;
2469         int inlen;
2470         int err;
2471         int tt;
2472         int ix;
2473
2474         inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2475         in = kvzalloc(inlen, GFP_KERNEL);
2476         if (!in)
2477                 return -ENOMEM;
2478
2479         MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2480         tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2481
2482         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2483
2484         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2485                 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
2486                                            inlen);
2487                 if (err)
2488                         goto free_in;
2489         }
2490
2491         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2492                 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2493                                            in, inlen);
2494                 if (err)
2495                         goto free_in;
2496         }
2497
2498 free_in:
2499         kvfree(in);
2500
2501         return err;
2502 }
2503
2504 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2505                                             enum mlx5e_traffic_types tt,
2506                                             u32 *tirc)
2507 {
2508         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2509
2510         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2511
2512         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2513         MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2514         MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2515
2516         mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2517 }
2518
2519 static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
2520 {
2521         struct mlx5_core_dev *mdev = priv->mdev;
2522         u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
2523         int err;
2524
2525         err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2526         if (err)
2527                 return err;
2528
2529         /* Update vport context MTU */
2530         mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2531         return 0;
2532 }
2533
2534 static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
2535 {
2536         struct mlx5_core_dev *mdev = priv->mdev;
2537         u16 hw_mtu = 0;
2538         int err;
2539
2540         err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2541         if (err || !hw_mtu) /* fallback to port oper mtu */
2542                 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2543
2544         *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
2545 }
2546
2547 static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2548 {
2549         struct net_device *netdev = priv->netdev;
2550         u16 mtu;
2551         int err;
2552
2553         err = mlx5e_set_mtu(priv, netdev->mtu);
2554         if (err)
2555                 return err;
2556
2557         mlx5e_query_mtu(priv, &mtu);
2558         if (mtu != netdev->mtu)
2559                 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2560                             __func__, mtu, netdev->mtu);
2561
2562         netdev->mtu = mtu;
2563         return 0;
2564 }
2565
2566 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2567 {
2568         struct mlx5e_priv *priv = netdev_priv(netdev);
2569         int nch = priv->channels.params.num_channels;
2570         int ntc = priv->channels.params.num_tc;
2571         int tc;
2572
2573         netdev_reset_tc(netdev);
2574
2575         if (ntc == 1)
2576                 return;
2577
2578         netdev_set_num_tc(netdev, ntc);
2579
2580         /* Map netdev TCs to offset 0
2581          * We have our own UP to TXQ mapping for QoS
2582          */
2583         for (tc = 0; tc < ntc; tc++)
2584                 netdev_set_tc_queue(netdev, tc, nch, 0);
2585 }
2586
2587 static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2588 {
2589         struct mlx5e_channel *c;
2590         struct mlx5e_txqsq *sq;
2591         int i, tc;
2592
2593         for (i = 0; i < priv->channels.num; i++)
2594                 for (tc = 0; tc < priv->profile->max_tc; tc++)
2595                         priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
2596
2597         for (i = 0; i < priv->channels.num; i++) {
2598                 c = priv->channels.c[i];
2599                 for (tc = 0; tc < c->num_tc; tc++) {
2600                         sq = &c->sq[tc];
2601                         priv->txq2sq[sq->txq_ix] = sq;
2602                 }
2603         }
2604 }
2605
2606 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2607 {
2608         int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2609         struct net_device *netdev = priv->netdev;
2610
2611         mlx5e_netdev_set_tcs(netdev);
2612         netif_set_real_num_tx_queues(netdev, num_txqs);
2613         netif_set_real_num_rx_queues(netdev, priv->channels.num);
2614
2615         mlx5e_build_channels_tx_maps(priv);
2616         mlx5e_activate_channels(&priv->channels);
2617         netif_tx_start_all_queues(priv->netdev);
2618
2619         if (MLX5_VPORT_MANAGER(priv->mdev))
2620                 mlx5e_add_sqs_fwd_rules(priv);
2621
2622         mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2623         mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
2624 }
2625
2626 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2627 {
2628         mlx5e_redirect_rqts_to_drop(priv);
2629
2630         if (MLX5_VPORT_MANAGER(priv->mdev))
2631                 mlx5e_remove_sqs_fwd_rules(priv);
2632
2633         /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2634          * polling for inactive tx queues.
2635          */
2636         netif_tx_stop_all_queues(priv->netdev);
2637         netif_tx_disable(priv->netdev);
2638         mlx5e_deactivate_channels(&priv->channels);
2639 }
2640
2641 void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2642                                 struct mlx5e_channels *new_chs,
2643                                 mlx5e_fp_hw_modify hw_modify)
2644 {
2645         struct net_device *netdev = priv->netdev;
2646         int new_num_txqs;
2647         int carrier_ok;
2648         new_num_txqs = new_chs->num * new_chs->params.num_tc;
2649
2650         carrier_ok = netif_carrier_ok(netdev);
2651         netif_carrier_off(netdev);
2652
2653         if (new_num_txqs < netdev->real_num_tx_queues)
2654                 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2655
2656         mlx5e_deactivate_priv_channels(priv);
2657         mlx5e_close_channels(&priv->channels);
2658
2659         priv->channels = *new_chs;
2660
2661         /* New channels are ready to roll, modify HW settings if needed */
2662         if (hw_modify)
2663                 hw_modify(priv);
2664
2665         mlx5e_refresh_tirs(priv, false);
2666         mlx5e_activate_priv_channels(priv);
2667
2668         /* return carrier back if needed */
2669         if (carrier_ok)
2670                 netif_carrier_on(netdev);
2671 }
2672
2673 void mlx5e_timestamp_set(struct mlx5e_priv *priv)
2674 {
2675         priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
2676         priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2677 }
2678
2679 int mlx5e_open_locked(struct net_device *netdev)
2680 {
2681         struct mlx5e_priv *priv = netdev_priv(netdev);
2682         int err;
2683
2684         set_bit(MLX5E_STATE_OPENED, &priv->state);
2685
2686         err = mlx5e_open_channels(priv, &priv->channels);
2687         if (err)
2688                 goto err_clear_state_opened_flag;
2689
2690         mlx5e_refresh_tirs(priv, false);
2691         mlx5e_activate_priv_channels(priv);
2692         if (priv->profile->update_carrier)
2693                 priv->profile->update_carrier(priv);
2694         mlx5e_timestamp_set(priv);
2695
2696         if (priv->profile->update_stats)
2697                 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
2698
2699         return 0;
2700
2701 err_clear_state_opened_flag:
2702         clear_bit(MLX5E_STATE_OPENED, &priv->state);
2703         return err;
2704 }
2705
2706 int mlx5e_open(struct net_device *netdev)
2707 {
2708         struct mlx5e_priv *priv = netdev_priv(netdev);
2709         int err;
2710
2711         mutex_lock(&priv->state_lock);
2712         err = mlx5e_open_locked(netdev);
2713         if (!err)
2714                 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
2715         mutex_unlock(&priv->state_lock);
2716
2717         return err;
2718 }
2719
2720 int mlx5e_close_locked(struct net_device *netdev)
2721 {
2722         struct mlx5e_priv *priv = netdev_priv(netdev);
2723
2724         /* May already be CLOSED in case a previous configuration operation
2725          * (e.g RX/TX queue size change) that involves close&open failed.
2726          */
2727         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2728                 return 0;
2729
2730         clear_bit(MLX5E_STATE_OPENED, &priv->state);
2731
2732         netif_carrier_off(priv->netdev);
2733         mlx5e_deactivate_priv_channels(priv);
2734         mlx5e_close_channels(&priv->channels);
2735
2736         return 0;
2737 }
2738
2739 int mlx5e_close(struct net_device *netdev)
2740 {
2741         struct mlx5e_priv *priv = netdev_priv(netdev);
2742         int err;
2743
2744         if (!netif_device_present(netdev))
2745                 return -ENODEV;
2746
2747         mutex_lock(&priv->state_lock);
2748         mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
2749         err = mlx5e_close_locked(netdev);
2750         mutex_unlock(&priv->state_lock);
2751
2752         return err;
2753 }
2754
2755 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
2756                                struct mlx5e_rq *rq,
2757                                struct mlx5e_rq_param *param)
2758 {
2759         void *rqc = param->rqc;
2760         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2761         int err;
2762
2763         param->wq.db_numa_node = param->wq.buf_numa_node;
2764
2765         err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2766                                 &rq->wq_ctrl);
2767         if (err)
2768                 return err;
2769
2770         rq->mdev = mdev;
2771
2772         return 0;
2773 }
2774
2775 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
2776                                struct mlx5e_cq *cq,
2777                                struct mlx5e_cq_param *param)
2778 {
2779         return mlx5e_alloc_cq_common(mdev, param, cq);
2780 }
2781
2782 static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
2783                               struct mlx5e_rq *drop_rq)
2784 {
2785         struct mlx5e_cq_param cq_param = {};
2786         struct mlx5e_rq_param rq_param = {};
2787         struct mlx5e_cq *cq = &drop_rq->cq;
2788         int err;
2789
2790         mlx5e_build_drop_rq_param(&rq_param);
2791
2792         err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
2793         if (err)
2794                 return err;
2795
2796         err = mlx5e_create_cq(cq, &cq_param);
2797         if (err)
2798                 goto err_free_cq;
2799
2800         err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
2801         if (err)
2802                 goto err_destroy_cq;
2803
2804         err = mlx5e_create_rq(drop_rq, &rq_param);
2805         if (err)
2806                 goto err_free_rq;
2807
2808         return 0;
2809
2810 err_free_rq:
2811         mlx5e_free_rq(drop_rq);
2812
2813 err_destroy_cq:
2814         mlx5e_destroy_cq(cq);
2815
2816 err_free_cq:
2817         mlx5e_free_cq(cq);
2818
2819         return err;
2820 }
2821
2822 static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
2823 {
2824         mlx5e_destroy_rq(drop_rq);
2825         mlx5e_free_rq(drop_rq);
2826         mlx5e_destroy_cq(&drop_rq->cq);
2827         mlx5e_free_cq(&drop_rq->cq);
2828 }
2829
2830 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
2831                      u32 underlay_qpn, u32 *tisn)
2832 {
2833         u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
2834         void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2835
2836         MLX5_SET(tisc, tisc, prio, tc << 1);
2837         MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
2838         MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
2839
2840         if (mlx5_lag_is_lacp_owner(mdev))
2841                 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2842
2843         return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
2844 }
2845
2846 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
2847 {
2848         mlx5_core_destroy_tis(mdev, tisn);
2849 }
2850
2851 int mlx5e_create_tises(struct mlx5e_priv *priv)
2852 {
2853         int err;
2854         int tc;
2855
2856         for (tc = 0; tc < priv->profile->max_tc; tc++) {
2857                 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
2858                 if (err)
2859                         goto err_close_tises;
2860         }
2861
2862         return 0;
2863
2864 err_close_tises:
2865         for (tc--; tc >= 0; tc--)
2866                 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2867
2868         return err;
2869 }
2870
2871 void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2872 {
2873         int tc;
2874
2875         for (tc = 0; tc < priv->profile->max_tc; tc++)
2876                 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2877 }
2878
2879 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
2880                                       enum mlx5e_traffic_types tt,
2881                                       u32 *tirc)
2882 {
2883         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2884
2885         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2886
2887         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2888         MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2889         mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
2890 }
2891
2892 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
2893 {
2894         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2895
2896         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2897
2898         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2899         MLX5_SET(tirc, tirc, indirect_table, rqtn);
2900         MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2901 }
2902
2903 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
2904 {
2905         struct mlx5e_tir *tir;
2906         void *tirc;
2907         int inlen;
2908         int i = 0;
2909         int err;
2910         u32 *in;
2911         int tt;
2912
2913         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2914         in = kvzalloc(inlen, GFP_KERNEL);
2915         if (!in)
2916                 return -ENOMEM;
2917
2918         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2919                 memset(in, 0, inlen);
2920                 tir = &priv->indir_tir[tt];
2921                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2922                 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
2923                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2924                 if (err) {
2925                         mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
2926                         goto err_destroy_inner_tirs;
2927                 }
2928         }
2929
2930         if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2931                 goto out;
2932
2933         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
2934                 memset(in, 0, inlen);
2935                 tir = &priv->inner_indir_tir[i];
2936                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2937                 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
2938                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2939                 if (err) {
2940                         mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
2941                         goto err_destroy_inner_tirs;
2942                 }
2943         }
2944
2945 out:
2946         kvfree(in);
2947
2948         return 0;
2949
2950 err_destroy_inner_tirs:
2951         for (i--; i >= 0; i--)
2952                 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
2953
2954         for (tt--; tt >= 0; tt--)
2955                 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2956
2957         kvfree(in);
2958
2959         return err;
2960 }
2961
2962 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
2963 {
2964         int nch = priv->profile->max_nch(priv->mdev);
2965         struct mlx5e_tir *tir;
2966         void *tirc;
2967         int inlen;
2968         int err;
2969         u32 *in;
2970         int ix;
2971
2972         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2973         in = kvzalloc(inlen, GFP_KERNEL);
2974         if (!in)
2975                 return -ENOMEM;
2976
2977         for (ix = 0; ix < nch; ix++) {
2978                 memset(in, 0, inlen);
2979                 tir = &priv->direct_tir[ix];
2980                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2981                 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
2982                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2983                 if (err)
2984                         goto err_destroy_ch_tirs;
2985         }
2986
2987         kvfree(in);
2988
2989         return 0;
2990
2991 err_destroy_ch_tirs:
2992         mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
2993         for (ix--; ix >= 0; ix--)
2994                 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
2995
2996         kvfree(in);
2997
2998         return err;
2999 }
3000
3001 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
3002 {
3003         int i;
3004
3005         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3006                 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
3007
3008         if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3009                 return;
3010
3011         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3012                 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3013 }
3014
3015 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
3016 {
3017         int nch = priv->profile->max_nch(priv->mdev);
3018         int i;
3019
3020         for (i = 0; i < nch; i++)
3021                 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3022 }
3023
3024 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3025 {
3026         int err = 0;
3027         int i;
3028
3029         for (i = 0; i < chs->num; i++) {
3030                 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3031                 if (err)
3032                         return err;
3033         }
3034
3035         return 0;
3036 }
3037
3038 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3039 {
3040         int err = 0;
3041         int i;
3042
3043         for (i = 0; i < chs->num; i++) {
3044                 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3045                 if (err)
3046                         return err;
3047         }
3048
3049         return 0;
3050 }
3051
3052 static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3053                                  struct tc_mqprio_qopt *mqprio)
3054 {
3055         struct mlx5e_priv *priv = netdev_priv(netdev);
3056         struct mlx5e_channels new_channels = {};
3057         u8 tc = mqprio->num_tc;
3058         int err = 0;
3059
3060         mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3061
3062         if (tc && tc != MLX5E_MAX_NUM_TC)
3063                 return -EINVAL;
3064
3065         mutex_lock(&priv->state_lock);
3066
3067         new_channels.params = priv->channels.params;
3068         new_channels.params.num_tc = tc ? tc : 1;
3069
3070         if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
3071                 priv->channels.params = new_channels.params;
3072                 goto out;
3073         }
3074
3075         err = mlx5e_open_channels(priv, &new_channels);
3076         if (err)
3077                 goto out;
3078
3079         mlx5e_switch_priv_channels(priv, &new_channels, NULL);
3080 out:
3081         mutex_unlock(&priv->state_lock);
3082         return err;
3083 }
3084
3085 #ifdef CONFIG_MLX5_ESWITCH
3086 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
3087                                      struct tc_cls_flower_offload *cls_flower)
3088 {
3089         if (cls_flower->common.chain_index)
3090                 return -EOPNOTSUPP;
3091
3092         switch (cls_flower->command) {
3093         case TC_CLSFLOWER_REPLACE:
3094                 return mlx5e_configure_flower(priv, cls_flower);
3095         case TC_CLSFLOWER_DESTROY:
3096                 return mlx5e_delete_flower(priv, cls_flower);
3097         case TC_CLSFLOWER_STATS:
3098                 return mlx5e_stats_flower(priv, cls_flower);
3099         default:
3100                 return -EOPNOTSUPP;
3101         }
3102 }
3103
3104 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3105                             void *cb_priv)
3106 {
3107         struct mlx5e_priv *priv = cb_priv;
3108
3109         switch (type) {
3110         case TC_SETUP_CLSFLOWER:
3111                 return mlx5e_setup_tc_cls_flower(priv, type_data);
3112         default:
3113                 return -EOPNOTSUPP;
3114         }
3115 }
3116
3117 static int mlx5e_setup_tc_block(struct net_device *dev,
3118                                 struct tc_block_offload *f)
3119 {
3120         struct mlx5e_priv *priv = netdev_priv(dev);
3121
3122         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3123                 return -EOPNOTSUPP;
3124
3125         switch (f->command) {
3126         case TC_BLOCK_BIND:
3127                 return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
3128                                              priv, priv);
3129         case TC_BLOCK_UNBIND:
3130                 tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
3131                                         priv);
3132                 return 0;
3133         default:
3134                 return -EOPNOTSUPP;
3135         }
3136 }
3137 #endif
3138
3139 int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3140                    void *type_data)
3141 {
3142         switch (type) {
3143 #ifdef CONFIG_MLX5_ESWITCH
3144         case TC_SETUP_BLOCK:
3145                 return mlx5e_setup_tc_block(dev, type_data);
3146 #endif
3147         case TC_SETUP_MQPRIO:
3148                 return mlx5e_setup_tc_mqprio(dev, type_data);
3149         default:
3150                 return -EOPNOTSUPP;
3151         }
3152 }
3153
3154 static void
3155 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3156 {
3157         struct mlx5e_priv *priv = netdev_priv(dev);
3158         struct mlx5e_sw_stats *sstats = &priv->stats.sw;
3159         struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3160         struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3161
3162         if (mlx5e_is_uplink_rep(priv)) {
3163                 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3164                 stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
3165                 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3166                 stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3167         } else {
3168                 stats->rx_packets = sstats->rx_packets;
3169                 stats->rx_bytes   = sstats->rx_bytes;
3170                 stats->tx_packets = sstats->tx_packets;
3171                 stats->tx_bytes   = sstats->tx_bytes;
3172                 stats->tx_dropped = sstats->tx_queue_dropped;
3173         }
3174
3175         stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3176
3177         stats->rx_length_errors =
3178                 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3179                 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3180                 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
3181         stats->rx_crc_errors =
3182                 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3183         stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3184         stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3185         stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3186                            stats->rx_frame_errors;
3187         stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3188
3189         /* vport multicast also counts packets that are dropped due to steering
3190          * or rx out of buffer
3191          */
3192         stats->multicast =
3193                 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3194 }
3195
3196 static void mlx5e_set_rx_mode(struct net_device *dev)
3197 {
3198         struct mlx5e_priv *priv = netdev_priv(dev);
3199
3200         queue_work(priv->wq, &priv->set_rx_mode_work);
3201 }
3202
3203 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3204 {
3205         struct mlx5e_priv *priv = netdev_priv(netdev);
3206         struct sockaddr *saddr = addr;
3207
3208         if (!is_valid_ether_addr(saddr->sa_data))
3209                 return -EADDRNOTAVAIL;
3210
3211         netif_addr_lock_bh(netdev);
3212         ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3213         netif_addr_unlock_bh(netdev);
3214
3215         queue_work(priv->wq, &priv->set_rx_mode_work);
3216
3217         return 0;
3218 }
3219
3220 #define MLX5E_SET_FEATURE(netdev, feature, enable)      \
3221         do {                                            \
3222                 if (enable)                             \
3223                         netdev->features |= feature;    \
3224                 else                                    \
3225                         netdev->features &= ~feature;   \
3226         } while (0)
3227
3228 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3229
3230 static int set_feature_lro(struct net_device *netdev, bool enable)
3231 {
3232         struct mlx5e_priv *priv = netdev_priv(netdev);
3233         struct mlx5e_channels new_channels = {};
3234         int err = 0;
3235         bool reset;
3236
3237         mutex_lock(&priv->state_lock);
3238
3239         reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
3240         reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3241
3242         new_channels.params = priv->channels.params;
3243         new_channels.params.lro_en = enable;
3244
3245         if (!reset) {
3246                 priv->channels.params = new_channels.params;
3247                 err = mlx5e_modify_tirs_lro(priv);
3248                 goto out;
3249         }
3250
3251         err = mlx5e_open_channels(priv, &new_channels);
3252         if (err)
3253                 goto out;
3254
3255         mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3256 out:
3257         mutex_unlock(&priv->state_lock);
3258         return err;
3259 }
3260
3261 static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
3262 {
3263         struct mlx5e_priv *priv = netdev_priv(netdev);
3264
3265         if (enable)
3266                 mlx5e_enable_vlan_filter(priv);
3267         else
3268                 mlx5e_disable_vlan_filter(priv);
3269
3270         return 0;
3271 }
3272
3273 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3274 {
3275         struct mlx5e_priv *priv = netdev_priv(netdev);
3276
3277         if (!enable && mlx5e_tc_num_filters(priv)) {
3278                 netdev_err(netdev,
3279                            "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3280                 return -EINVAL;
3281         }
3282
3283         return 0;
3284 }
3285
3286 static int set_feature_rx_all(struct net_device *netdev, bool enable)
3287 {
3288         struct mlx5e_priv *priv = netdev_priv(netdev);
3289         struct mlx5_core_dev *mdev = priv->mdev;
3290
3291         return mlx5_set_port_fcs(mdev, !enable);
3292 }
3293
3294 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3295 {
3296         struct mlx5e_priv *priv = netdev_priv(netdev);
3297         int err;
3298
3299         mutex_lock(&priv->state_lock);
3300
3301         priv->channels.params.scatter_fcs_en = enable;
3302         err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3303         if (err)
3304                 priv->channels.params.scatter_fcs_en = !enable;
3305
3306         mutex_unlock(&priv->state_lock);
3307
3308         return err;
3309 }
3310
3311 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3312 {
3313         struct mlx5e_priv *priv = netdev_priv(netdev);
3314         int err = 0;
3315
3316         mutex_lock(&priv->state_lock);
3317
3318         priv->channels.params.vlan_strip_disable = !enable;
3319         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3320                 goto unlock;
3321
3322         err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
3323         if (err)
3324                 priv->channels.params.vlan_strip_disable = enable;
3325
3326 unlock:
3327         mutex_unlock(&priv->state_lock);
3328
3329         return err;
3330 }
3331
3332 #ifdef CONFIG_RFS_ACCEL
3333 static int set_feature_arfs(struct net_device *netdev, bool enable)
3334 {
3335         struct mlx5e_priv *priv = netdev_priv(netdev);
3336         int err;
3337
3338         if (enable)
3339                 err = mlx5e_arfs_enable(priv);
3340         else
3341                 err = mlx5e_arfs_disable(priv);
3342
3343         return err;
3344 }
3345 #endif
3346
3347 static int mlx5e_handle_feature(struct net_device *netdev,
3348                                 netdev_features_t wanted_features,
3349                                 netdev_features_t feature,
3350                                 mlx5e_feature_handler feature_handler)
3351 {
3352         netdev_features_t changes = wanted_features ^ netdev->features;
3353         bool enable = !!(wanted_features & feature);
3354         int err;
3355
3356         if (!(changes & feature))
3357                 return 0;
3358
3359         err = feature_handler(netdev, enable);
3360         if (err) {
3361                 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3362                            enable ? "Enable" : "Disable", &feature, err);
3363                 return err;
3364         }
3365
3366         MLX5E_SET_FEATURE(netdev, feature, enable);
3367         return 0;
3368 }
3369
3370 static int mlx5e_set_features(struct net_device *netdev,
3371                               netdev_features_t features)
3372 {
3373         int err;
3374
3375         err  = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
3376                                     set_feature_lro);
3377         err |= mlx5e_handle_feature(netdev, features,
3378                                     NETIF_F_HW_VLAN_CTAG_FILTER,
3379                                     set_feature_vlan_filter);
3380         err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
3381                                     set_feature_tc_num_filters);
3382         err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
3383                                     set_feature_rx_all);
3384         err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
3385                                     set_feature_rx_fcs);
3386         err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
3387                                     set_feature_rx_vlan);
3388 #ifdef CONFIG_RFS_ACCEL
3389         err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
3390                                     set_feature_arfs);
3391 #endif
3392
3393         return err ? -EINVAL : 0;
3394 }
3395
3396 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
3397 {
3398         struct mlx5e_priv *priv = netdev_priv(netdev);
3399         struct mlx5e_channels new_channels = {};
3400         int curr_mtu;
3401         int err = 0;
3402         bool reset;
3403
3404         mutex_lock(&priv->state_lock);
3405
3406         reset = !priv->channels.params.lro_en &&
3407                 (priv->channels.params.rq_wq_type !=
3408                  MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
3409
3410         reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3411
3412         curr_mtu    = netdev->mtu;
3413         netdev->mtu = new_mtu;
3414
3415         if (!reset) {
3416                 mlx5e_set_dev_port_mtu(priv);
3417                 goto out;
3418         }
3419
3420         new_channels.params = priv->channels.params;
3421         err = mlx5e_open_channels(priv, &new_channels);
3422         if (err) {
3423                 netdev->mtu = curr_mtu;
3424                 goto out;
3425         }
3426
3427         mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
3428
3429 out:
3430         mutex_unlock(&priv->state_lock);
3431         return err;
3432 }
3433
3434 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3435 {
3436         struct hwtstamp_config config;
3437         int err;
3438
3439         if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3440                 return -EOPNOTSUPP;
3441
3442         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3443                 return -EFAULT;
3444
3445         /* TX HW timestamp */
3446         switch (config.tx_type) {
3447         case HWTSTAMP_TX_OFF:
3448         case HWTSTAMP_TX_ON:
3449                 break;
3450         default:
3451                 return -ERANGE;
3452         }
3453
3454         mutex_lock(&priv->state_lock);
3455         /* RX HW timestamp */
3456         switch (config.rx_filter) {
3457         case HWTSTAMP_FILTER_NONE:
3458                 /* Reset CQE compression to Admin default */
3459                 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3460                 break;
3461         case HWTSTAMP_FILTER_ALL:
3462         case HWTSTAMP_FILTER_SOME:
3463         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3464         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3465         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3466         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3467         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3468         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3469         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3470         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3471         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3472         case HWTSTAMP_FILTER_PTP_V2_EVENT:
3473         case HWTSTAMP_FILTER_PTP_V2_SYNC:
3474         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3475         case HWTSTAMP_FILTER_NTP_ALL:
3476                 /* Disable CQE compression */
3477                 netdev_warn(priv->netdev, "Disabling cqe compression");
3478                 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3479                 if (err) {
3480                         netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3481                         mutex_unlock(&priv->state_lock);
3482                         return err;
3483                 }
3484                 config.rx_filter = HWTSTAMP_FILTER_ALL;
3485                 break;
3486         default:
3487                 mutex_unlock(&priv->state_lock);
3488                 return -ERANGE;
3489         }
3490
3491         memcpy(&priv->tstamp, &config, sizeof(config));
3492         mutex_unlock(&priv->state_lock);
3493
3494         return copy_to_user(ifr->ifr_data, &config,
3495                             sizeof(config)) ? -EFAULT : 0;
3496 }
3497
3498 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
3499 {
3500         struct hwtstamp_config *cfg = &priv->tstamp;
3501
3502         if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3503                 return -EOPNOTSUPP;
3504
3505         return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
3506 }
3507
3508 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3509 {
3510         struct mlx5e_priv *priv = netdev_priv(dev);
3511
3512         switch (cmd) {
3513         case SIOCSHWTSTAMP:
3514                 return mlx5e_hwstamp_set(priv, ifr);
3515         case SIOCGHWTSTAMP:
3516                 return mlx5e_hwstamp_get(priv, ifr);
3517         default:
3518                 return -EOPNOTSUPP;
3519         }
3520 }
3521
3522 #ifdef CONFIG_MLX5_ESWITCH
3523 static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3524 {
3525         struct mlx5e_priv *priv = netdev_priv(dev);
3526         struct mlx5_core_dev *mdev = priv->mdev;
3527
3528         return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3529 }
3530
3531 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3532                              __be16 vlan_proto)
3533 {
3534         struct mlx5e_priv *priv = netdev_priv(dev);
3535         struct mlx5_core_dev *mdev = priv->mdev;
3536
3537         if (vlan_proto != htons(ETH_P_8021Q))
3538                 return -EPROTONOSUPPORT;
3539
3540         return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3541                                            vlan, qos);
3542 }
3543
3544 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3545 {
3546         struct mlx5e_priv *priv = netdev_priv(dev);
3547         struct mlx5_core_dev *mdev = priv->mdev;
3548
3549         return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3550 }
3551
3552 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3553 {
3554         struct mlx5e_priv *priv = netdev_priv(dev);
3555         struct mlx5_core_dev *mdev = priv->mdev;
3556
3557         return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3558 }
3559
3560 static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3561                              int max_tx_rate)
3562 {
3563         struct mlx5e_priv *priv = netdev_priv(dev);
3564         struct mlx5_core_dev *mdev = priv->mdev;
3565
3566         return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
3567                                            max_tx_rate, min_tx_rate);
3568 }
3569
3570 static int mlx5_vport_link2ifla(u8 esw_link)
3571 {
3572         switch (esw_link) {
3573         case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3574                 return IFLA_VF_LINK_STATE_DISABLE;
3575         case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3576                 return IFLA_VF_LINK_STATE_ENABLE;
3577         }
3578         return IFLA_VF_LINK_STATE_AUTO;
3579 }
3580
3581 static int mlx5_ifla_link2vport(u8 ifla_link)
3582 {
3583         switch (ifla_link) {
3584         case IFLA_VF_LINK_STATE_DISABLE:
3585                 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3586         case IFLA_VF_LINK_STATE_ENABLE:
3587                 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3588         }
3589         return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3590 }
3591
3592 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3593                                    int link_state)
3594 {
3595         struct mlx5e_priv *priv = netdev_priv(dev);
3596         struct mlx5_core_dev *mdev = priv->mdev;
3597
3598         return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3599                                             mlx5_ifla_link2vport(link_state));
3600 }
3601
3602 static int mlx5e_get_vf_config(struct net_device *dev,
3603                                int vf, struct ifla_vf_info *ivi)
3604 {
3605         struct mlx5e_priv *priv = netdev_priv(dev);
3606         struct mlx5_core_dev *mdev = priv->mdev;
3607         int err;
3608
3609         err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3610         if (err)
3611                 return err;
3612         ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3613         return 0;
3614 }
3615
3616 static int mlx5e_get_vf_stats(struct net_device *dev,
3617                               int vf, struct ifla_vf_stats *vf_stats)
3618 {
3619         struct mlx5e_priv *priv = netdev_priv(dev);
3620         struct mlx5_core_dev *mdev = priv->mdev;
3621
3622         return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3623                                             vf_stats);
3624 }
3625 #endif
3626
3627 static void mlx5e_add_vxlan_port(struct net_device *netdev,
3628                                  struct udp_tunnel_info *ti)
3629 {
3630         struct mlx5e_priv *priv = netdev_priv(netdev);
3631
3632         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3633                 return;
3634
3635         if (!mlx5e_vxlan_allowed(priv->mdev))
3636                 return;
3637
3638         mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
3639 }
3640
3641 static void mlx5e_del_vxlan_port(struct net_device *netdev,
3642                                  struct udp_tunnel_info *ti)
3643 {
3644         struct mlx5e_priv *priv = netdev_priv(netdev);
3645
3646         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3647                 return;
3648
3649         if (!mlx5e_vxlan_allowed(priv->mdev))
3650                 return;
3651
3652         mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
3653 }
3654
3655 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3656                                                      struct sk_buff *skb,
3657                                                      netdev_features_t features)
3658 {
3659         struct udphdr *udph;
3660         u8 proto;
3661         u16 port;
3662
3663         switch (vlan_get_protocol(skb)) {
3664         case htons(ETH_P_IP):
3665                 proto = ip_hdr(skb)->protocol;
3666                 break;
3667         case htons(ETH_P_IPV6):
3668                 proto = ipv6_hdr(skb)->nexthdr;
3669                 break;
3670         default:
3671                 goto out;
3672         }
3673
3674         switch (proto) {
3675         case IPPROTO_GRE:
3676                 return features;
3677         case IPPROTO_UDP:
3678                 udph = udp_hdr(skb);
3679                 port = be16_to_cpu(udph->dest);
3680
3681                 /* Verify if UDP port is being offloaded by HW */
3682                 if (mlx5e_vxlan_lookup_port(priv, port))
3683                         return features;
3684         }
3685
3686 out:
3687         /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3688         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3689 }
3690
3691 static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3692                                               struct net_device *netdev,
3693                                               netdev_features_t features)
3694 {
3695         struct mlx5e_priv *priv = netdev_priv(netdev);
3696
3697         features = vlan_features_check(skb, features);
3698         features = vxlan_features_check(skb, features);
3699
3700 #ifdef CONFIG_MLX5_EN_IPSEC
3701         if (mlx5e_ipsec_feature_check(skb, netdev, features))
3702                 return features;
3703 #endif
3704
3705         /* Validate if the tunneled packet is being offloaded by HW */
3706         if (skb->encapsulation &&
3707             (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
3708                 return mlx5e_tunnel_features_check(priv, skb, features);
3709
3710         return features;
3711 }
3712
3713 static void mlx5e_tx_timeout(struct net_device *dev)
3714 {
3715         struct mlx5e_priv *priv = netdev_priv(dev);
3716         bool sched_work = false;
3717         int i;
3718
3719         netdev_err(dev, "TX timeout detected\n");
3720
3721         for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
3722                 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3723
3724                 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3725                         continue;
3726                 sched_work = true;
3727                 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3728                 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3729                            i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
3730         }
3731
3732         if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
3733                 schedule_work(&priv->tx_timeout_work);
3734 }
3735
3736 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3737 {
3738         struct mlx5e_priv *priv = netdev_priv(netdev);
3739         struct bpf_prog *old_prog;
3740         int err = 0;
3741         bool reset, was_opened;
3742         int i;
3743
3744         mutex_lock(&priv->state_lock);
3745
3746         if ((netdev->features & NETIF_F_LRO) && prog) {
3747                 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3748                 err = -EINVAL;
3749                 goto unlock;
3750         }
3751
3752         if ((netdev->features & NETIF_F_HW_ESP) && prog) {
3753                 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
3754                 err = -EINVAL;
3755                 goto unlock;
3756         }
3757
3758         was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3759         /* no need for full reset when exchanging programs */
3760         reset = (!priv->channels.params.xdp_prog || !prog);
3761
3762         if (was_opened && reset)
3763                 mlx5e_close_locked(netdev);
3764         if (was_opened && !reset) {
3765                 /* num_channels is invariant here, so we can take the
3766                  * batched reference right upfront.
3767                  */
3768                 prog = bpf_prog_add(prog, priv->channels.num);
3769                 if (IS_ERR(prog)) {
3770                         err = PTR_ERR(prog);
3771                         goto unlock;
3772                 }
3773         }
3774
3775         /* exchange programs, extra prog reference we got from caller
3776          * as long as we don't fail from this point onwards.
3777          */
3778         old_prog = xchg(&priv->channels.params.xdp_prog, prog);
3779         if (old_prog)
3780                 bpf_prog_put(old_prog);
3781
3782         if (reset) /* change RQ type according to priv->xdp_prog */
3783                 mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
3784
3785         if (was_opened && reset)
3786                 mlx5e_open_locked(netdev);
3787
3788         if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3789                 goto unlock;
3790
3791         /* exchanging programs w/o reset, we update ref counts on behalf
3792          * of the channels RQs here.
3793          */
3794         for (i = 0; i < priv->channels.num; i++) {
3795                 struct mlx5e_channel *c = priv->channels.c[i];
3796
3797                 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
3798                 napi_synchronize(&c->napi);
3799                 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3800
3801                 old_prog = xchg(&c->rq.xdp_prog, prog);
3802
3803                 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
3804                 /* napi_schedule in case we have missed anything */
3805                 napi_schedule(&c->napi);
3806
3807                 if (old_prog)
3808                         bpf_prog_put(old_prog);
3809         }
3810
3811 unlock:
3812         mutex_unlock(&priv->state_lock);
3813         return err;
3814 }
3815
3816 static u32 mlx5e_xdp_query(struct net_device *dev)
3817 {
3818         struct mlx5e_priv *priv = netdev_priv(dev);
3819         const struct bpf_prog *xdp_prog;
3820         u32 prog_id = 0;
3821
3822         mutex_lock(&priv->state_lock);
3823         xdp_prog = priv->channels.params.xdp_prog;
3824         if (xdp_prog)
3825                 prog_id = xdp_prog->aux->id;
3826         mutex_unlock(&priv->state_lock);
3827
3828         return prog_id;
3829 }
3830
3831 static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
3832 {
3833         switch (xdp->command) {
3834         case XDP_SETUP_PROG:
3835                 return mlx5e_xdp_set(dev, xdp->prog);
3836         case XDP_QUERY_PROG:
3837                 xdp->prog_id = mlx5e_xdp_query(dev);
3838                 xdp->prog_attached = !!xdp->prog_id;
3839                 return 0;
3840         default:
3841                 return -EINVAL;
3842         }
3843 }
3844
3845 #ifdef CONFIG_NET_POLL_CONTROLLER
3846 /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3847  * reenabling interrupts.
3848  */
3849 static void mlx5e_netpoll(struct net_device *dev)
3850 {
3851         struct mlx5e_priv *priv = netdev_priv(dev);
3852         struct mlx5e_channels *chs = &priv->channels;
3853
3854         int i;
3855
3856         for (i = 0; i < chs->num; i++)
3857                 napi_schedule(&chs->c[i]->napi);
3858 }
3859 #endif
3860
3861 static const struct net_device_ops mlx5e_netdev_ops = {
3862         .ndo_open                = mlx5e_open,
3863         .ndo_stop                = mlx5e_close,
3864         .ndo_start_xmit          = mlx5e_xmit,
3865         .ndo_setup_tc            = mlx5e_setup_tc,
3866         .ndo_select_queue        = mlx5e_select_queue,
3867         .ndo_get_stats64         = mlx5e_get_stats,
3868         .ndo_set_rx_mode         = mlx5e_set_rx_mode,
3869         .ndo_set_mac_address     = mlx5e_set_mac,
3870         .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
3871         .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
3872         .ndo_set_features        = mlx5e_set_features,
3873         .ndo_change_mtu          = mlx5e_change_mtu,
3874         .ndo_do_ioctl            = mlx5e_ioctl,
3875         .ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
3876         .ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
3877         .ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
3878         .ndo_features_check      = mlx5e_features_check,
3879 #ifdef CONFIG_RFS_ACCEL
3880         .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
3881 #endif
3882         .ndo_tx_timeout          = mlx5e_tx_timeout,
3883         .ndo_xdp                 = mlx5e_xdp,
3884 #ifdef CONFIG_NET_POLL_CONTROLLER
3885         .ndo_poll_controller     = mlx5e_netpoll,
3886 #endif
3887 #ifdef CONFIG_MLX5_ESWITCH
3888         /* SRIOV E-Switch NDOs */
3889         .ndo_set_vf_mac          = mlx5e_set_vf_mac,
3890         .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
3891         .ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
3892         .ndo_set_vf_trust        = mlx5e_set_vf_trust,
3893         .ndo_set_vf_rate         = mlx5e_set_vf_rate,
3894         .ndo_get_vf_config       = mlx5e_get_vf_config,
3895         .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
3896         .ndo_get_vf_stats        = mlx5e_get_vf_stats,
3897         .ndo_has_offload_stats   = mlx5e_has_offload_stats,
3898         .ndo_get_offload_stats   = mlx5e_get_offload_stats,
3899 #endif
3900 };
3901
3902 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3903 {
3904         if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3905                 return -EOPNOTSUPP;
3906         if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3907             !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3908             !MLX5_CAP_ETH(mdev, csum_cap) ||
3909             !MLX5_CAP_ETH(mdev, max_lso_cap) ||
3910             !MLX5_CAP_ETH(mdev, vlan_cap) ||
3911             !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
3912             MLX5_CAP_FLOWTABLE(mdev,
3913                                flow_table_properties_nic_receive.max_ft_level)
3914                                < 3) {
3915                 mlx5_core_warn(mdev,
3916                                "Not creating net device, some required device capabilities are missing\n");
3917                 return -EOPNOTSUPP;
3918         }
3919         if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3920                 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
3921         if (!MLX5_CAP_GEN(mdev, cq_moderation))
3922                 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
3923
3924         return 0;
3925 }
3926
3927 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3928 {
3929         int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
3930
3931         return bf_buf_size -
3932                sizeof(struct mlx5e_tx_wqe) +
3933                2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3934 }
3935
3936 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
3937                                    int num_channels)
3938 {
3939         int i;
3940
3941         for (i = 0; i < len; i++)
3942                 indirection_rqt[i] = i % num_channels;
3943 }
3944
3945 static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
3946 {
3947         enum pcie_link_width width;
3948         enum pci_bus_speed speed;
3949         int err = 0;
3950
3951         err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
3952         if (err)
3953                 return err;
3954
3955         if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
3956                 return -EINVAL;
3957
3958         switch (speed) {
3959         case PCIE_SPEED_2_5GT:
3960                 *pci_bw = 2500 * width;
3961                 break;
3962         case PCIE_SPEED_5_0GT:
3963                 *pci_bw = 5000 * width;
3964                 break;
3965         case PCIE_SPEED_8_0GT:
3966                 *pci_bw = 8000 * width;
3967                 break;
3968         default:
3969                 return -EINVAL;
3970         }
3971
3972         return 0;
3973 }
3974
3975 static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
3976 {
3977         return (link_speed && pci_bw &&
3978                 (pci_bw < 40000) && (pci_bw < link_speed));
3979 }
3980
3981 static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
3982 {
3983         return !(link_speed && pci_bw &&
3984                  (pci_bw <= 16000) && (pci_bw < link_speed));
3985 }
3986
3987 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
3988 {
3989         params->rx_cq_period_mode = cq_period_mode;
3990
3991         params->rx_cq_moderation.pkts =
3992                 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3993         params->rx_cq_moderation.usec =
3994                         MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3995
3996         if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
3997                 params->rx_cq_moderation.usec =
3998                         MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
3999
4000         if (params->rx_am_enabled)
4001                 params->rx_cq_moderation =
4002                         mlx5e_am_get_def_profile(params->rx_cq_period_mode);
4003
4004         MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
4005                         params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4006 }
4007
4008 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4009 {
4010         int i;
4011
4012         /* The supported periods are organized in ascending order */
4013         for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4014                 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4015                         break;
4016
4017         return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4018 }
4019
4020 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
4021                             struct mlx5e_params *params,
4022                             u16 max_channels)
4023 {
4024         u8 cq_period_mode = 0;
4025         u32 link_speed = 0;
4026         u32 pci_bw = 0;
4027
4028         params->num_channels = max_channels;
4029         params->num_tc       = 1;
4030
4031         mlx5e_get_max_linkspeed(mdev, &link_speed);
4032         mlx5e_get_pci_bw(mdev, &pci_bw);
4033         mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
4034                       link_speed, pci_bw);
4035
4036         /* SQ */
4037         params->log_sq_size = is_kdump_kernel() ?
4038                 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4039                 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
4040
4041         /* set CQE compression */
4042         params->rx_cqe_compress_def = false;
4043         if (MLX5_CAP_GEN(mdev, cqe_compression) &&
4044             MLX5_CAP_GEN(mdev, vport_group_manager))
4045                 params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
4046
4047         MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4048
4049         /* RQ */
4050         mlx5e_set_rq_params(mdev, params);
4051
4052         /* HW LRO */
4053
4054         /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
4055         if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
4056                 params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
4057         params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
4058
4059         /* CQ moderation params */
4060         cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
4061                         MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4062                         MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
4063         params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4064         mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
4065
4066         params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4067         params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4068
4069         /* TX inline */
4070         params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
4071         mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
4072         if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
4073             !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
4074                 params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
4075
4076         /* RSS */
4077         params->rss_hfunc = ETH_RSS_HASH_XOR;
4078         netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
4079         mlx5e_build_default_indir_rqt(params->indirection_rqt,
4080                                       MLX5E_INDIR_RQT_SIZE, max_channels);
4081 }
4082
4083 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4084                                         struct net_device *netdev,
4085                                         const struct mlx5e_profile *profile,
4086                                         void *ppriv)
4087 {
4088         struct mlx5e_priv *priv = netdev_priv(netdev);
4089
4090         priv->mdev        = mdev;
4091         priv->netdev      = netdev;
4092         priv->profile     = profile;
4093         priv->ppriv       = ppriv;
4094         priv->hard_mtu = MLX5E_ETH_HARD_MTU;
4095
4096         mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
4097
4098         mutex_init(&priv->state_lock);
4099
4100         INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
4101         INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
4102         INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
4103         INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
4104 }
4105
4106 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4107 {
4108         struct mlx5e_priv *priv = netdev_priv(netdev);
4109
4110         mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
4111         if (is_zero_ether_addr(netdev->dev_addr) &&
4112             !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4113                 eth_hw_addr_random(netdev);
4114                 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4115         }
4116 }
4117
4118 #if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
4119 static const struct switchdev_ops mlx5e_switchdev_ops = {
4120         .switchdev_port_attr_get        = mlx5e_attr_get,
4121 };
4122 #endif
4123
4124 static void mlx5e_build_nic_netdev(struct net_device *netdev)
4125 {
4126         struct mlx5e_priv *priv = netdev_priv(netdev);
4127         struct mlx5_core_dev *mdev = priv->mdev;
4128         bool fcs_supported;
4129         bool fcs_enabled;
4130
4131         SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4132
4133         netdev->netdev_ops = &mlx5e_netdev_ops;
4134
4135 #ifdef CONFIG_MLX5_CORE_EN_DCB
4136         if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4137                 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
4138 #endif
4139
4140         netdev->watchdog_timeo    = 15 * HZ;
4141
4142         netdev->ethtool_ops       = &mlx5e_ethtool_ops;
4143
4144         netdev->vlan_features    |= NETIF_F_SG;
4145         netdev->vlan_features    |= NETIF_F_IP_CSUM;
4146         netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
4147         netdev->vlan_features    |= NETIF_F_GRO;
4148         netdev->vlan_features    |= NETIF_F_TSO;
4149         netdev->vlan_features    |= NETIF_F_TSO6;
4150         netdev->vlan_features    |= NETIF_F_RXCSUM;
4151         netdev->vlan_features    |= NETIF_F_RXHASH;
4152
4153         if (!!MLX5_CAP_ETH(mdev, lro_cap))
4154                 netdev->vlan_features    |= NETIF_F_LRO;
4155
4156         netdev->hw_features       = netdev->vlan_features;
4157         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
4158         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
4159         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
4160
4161         if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4162                 netdev->hw_features     |= NETIF_F_GSO_PARTIAL;
4163                 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
4164                 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
4165                 netdev->hw_enc_features |= NETIF_F_TSO;
4166                 netdev->hw_enc_features |= NETIF_F_TSO6;
4167                 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4168         }
4169
4170         if (mlx5e_vxlan_allowed(mdev)) {
4171                 netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
4172                                            NETIF_F_GSO_UDP_TUNNEL_CSUM;
4173                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4174                                            NETIF_F_GSO_UDP_TUNNEL_CSUM;
4175                 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
4176         }
4177
4178         if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4179                 netdev->hw_features     |= NETIF_F_GSO_GRE |
4180                                            NETIF_F_GSO_GRE_CSUM;
4181                 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4182                                            NETIF_F_GSO_GRE_CSUM;
4183                 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4184                                                 NETIF_F_GSO_GRE_CSUM;
4185         }
4186
4187         mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4188
4189         if (fcs_supported)
4190                 netdev->hw_features |= NETIF_F_RXALL;
4191
4192         if (MLX5_CAP_ETH(mdev, scatter_fcs))
4193                 netdev->hw_features |= NETIF_F_RXFCS;
4194
4195         netdev->features          = netdev->hw_features;
4196         if (!priv->channels.params.lro_en)
4197                 netdev->features  &= ~NETIF_F_LRO;
4198
4199         if (fcs_enabled)
4200                 netdev->features  &= ~NETIF_F_RXALL;
4201
4202         if (!priv->channels.params.scatter_fcs_en)
4203                 netdev->features  &= ~NETIF_F_RXFCS;
4204
4205 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4206         if (FT_CAP(flow_modify_en) &&
4207             FT_CAP(modify_root) &&
4208             FT_CAP(identified_miss_table_mode) &&
4209             FT_CAP(flow_table_modify)) {
4210                 netdev->hw_features      |= NETIF_F_HW_TC;
4211 #ifdef CONFIG_RFS_ACCEL
4212                 netdev->hw_features      |= NETIF_F_NTUPLE;
4213 #endif
4214         }
4215
4216         netdev->features         |= NETIF_F_HIGHDMA;
4217
4218         netdev->priv_flags       |= IFF_UNICAST_FLT;
4219
4220         mlx5e_set_netdev_dev_addr(netdev);
4221
4222 #if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
4223         if (MLX5_VPORT_MANAGER(mdev))
4224                 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4225 #endif
4226
4227         mlx5e_ipsec_build_netdev(priv);
4228 }
4229
4230 static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
4231 {
4232         struct mlx5_core_dev *mdev = priv->mdev;
4233         int err;
4234
4235         err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4236         if (err) {
4237                 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4238                 priv->q_counter = 0;
4239         }
4240 }
4241
4242 static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
4243 {
4244         if (!priv->q_counter)
4245                 return;
4246
4247         mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4248 }
4249
4250 static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4251                            struct net_device *netdev,
4252                            const struct mlx5e_profile *profile,
4253                            void *ppriv)
4254 {
4255         struct mlx5e_priv *priv = netdev_priv(netdev);
4256         int err;
4257
4258         mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
4259         err = mlx5e_ipsec_init(priv);
4260         if (err)
4261                 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
4262         mlx5e_build_nic_netdev(netdev);
4263         mlx5e_vxlan_init(priv);
4264 }
4265
4266 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4267 {
4268         mlx5e_ipsec_cleanup(priv);
4269         mlx5e_vxlan_cleanup(priv);
4270
4271         if (priv->channels.params.xdp_prog)
4272                 bpf_prog_put(priv->channels.params.xdp_prog);
4273 }
4274
4275 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4276 {
4277         struct mlx5_core_dev *mdev = priv->mdev;
4278         int err;
4279
4280         err = mlx5e_create_indirect_rqt(priv);
4281         if (err)
4282                 return err;
4283
4284         err = mlx5e_create_direct_rqts(priv);
4285         if (err)
4286                 goto err_destroy_indirect_rqts;
4287
4288         err = mlx5e_create_indirect_tirs(priv);
4289         if (err)
4290                 goto err_destroy_direct_rqts;
4291
4292         err = mlx5e_create_direct_tirs(priv);
4293         if (err)
4294                 goto err_destroy_indirect_tirs;
4295
4296         err = mlx5e_create_flow_steering(priv);
4297         if (err) {
4298                 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4299                 goto err_destroy_direct_tirs;
4300         }
4301
4302         err = mlx5e_tc_init(priv);
4303         if (err)
4304                 goto err_destroy_flow_steering;
4305
4306         return 0;
4307
4308 err_destroy_flow_steering:
4309         mlx5e_destroy_flow_steering(priv);
4310 err_destroy_direct_tirs:
4311         mlx5e_destroy_direct_tirs(priv);
4312 err_destroy_indirect_tirs:
4313         mlx5e_destroy_indirect_tirs(priv);
4314 err_destroy_direct_rqts:
4315         mlx5e_destroy_direct_rqts(priv);
4316 err_destroy_indirect_rqts:
4317         mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4318         return err;
4319 }
4320
4321 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4322 {
4323         mlx5e_tc_cleanup(priv);
4324         mlx5e_destroy_flow_steering(priv);
4325         mlx5e_destroy_direct_tirs(priv);
4326         mlx5e_destroy_indirect_tirs(priv);
4327         mlx5e_destroy_direct_rqts(priv);
4328         mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4329 }
4330
4331 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4332 {
4333         int err;
4334
4335         err = mlx5e_create_tises(priv);
4336         if (err) {
4337                 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4338                 return err;
4339         }
4340
4341 #ifdef CONFIG_MLX5_CORE_EN_DCB
4342         mlx5e_dcbnl_initialize(priv);
4343 #endif
4344         return 0;
4345 }
4346
4347 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4348 {
4349         struct net_device *netdev = priv->netdev;
4350         struct mlx5_core_dev *mdev = priv->mdev;
4351         u16 max_mtu;
4352
4353         mlx5e_init_l2_addr(priv);
4354
4355         /* Marking the link as currently not needed by the Driver */
4356         if (!netif_running(netdev))
4357                 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4358
4359         /* MTU range: 68 - hw-specific max */
4360         netdev->min_mtu = ETH_MIN_MTU;
4361         mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
4362         netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
4363         mlx5e_set_dev_port_mtu(priv);
4364
4365         mlx5_lag_add(mdev, netdev);
4366
4367         mlx5e_enable_async_events(priv);
4368
4369         if (MLX5_VPORT_MANAGER(priv->mdev))
4370                 mlx5e_register_vport_reps(priv);
4371
4372         if (netdev->reg_state != NETREG_REGISTERED)
4373                 return;
4374
4375         /* Device already registered: sync netdev system state */
4376         if (mlx5e_vxlan_allowed(mdev)) {
4377                 rtnl_lock();
4378                 udp_tunnel_get_rx_info(netdev);
4379                 rtnl_unlock();
4380         }
4381
4382         queue_work(priv->wq, &priv->set_rx_mode_work);
4383
4384         rtnl_lock();
4385         if (netif_running(netdev))
4386                 mlx5e_open(netdev);
4387         netif_device_attach(netdev);
4388         rtnl_unlock();
4389 }
4390
4391 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4392 {
4393         struct mlx5_core_dev *mdev = priv->mdev;
4394
4395         rtnl_lock();
4396         if (netif_running(priv->netdev))
4397                 mlx5e_close(priv->netdev);
4398         netif_device_detach(priv->netdev);
4399         rtnl_unlock();
4400
4401         queue_work(priv->wq, &priv->set_rx_mode_work);
4402
4403         if (MLX5_VPORT_MANAGER(priv->mdev))
4404                 mlx5e_unregister_vport_reps(priv);
4405
4406         mlx5e_disable_async_events(priv);
4407         mlx5_lag_remove(mdev);
4408 }
4409
4410 static const struct mlx5e_profile mlx5e_nic_profile = {
4411         .init              = mlx5e_nic_init,
4412         .cleanup           = mlx5e_nic_cleanup,
4413         .init_rx           = mlx5e_init_nic_rx,
4414         .cleanup_rx        = mlx5e_cleanup_nic_rx,
4415         .init_tx           = mlx5e_init_nic_tx,
4416         .cleanup_tx        = mlx5e_cleanup_nic_tx,
4417         .enable            = mlx5e_nic_enable,
4418         .disable           = mlx5e_nic_disable,
4419         .update_stats      = mlx5e_update_ndo_stats,
4420         .max_nch           = mlx5e_get_max_num_channels,
4421         .update_carrier    = mlx5e_update_carrier,
4422         .rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe,
4423         .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
4424         .max_tc            = MLX5E_MAX_NUM_TC,
4425 };
4426
4427 /* mlx5e generic netdev management API (move to en_common.c) */
4428
4429 struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4430                                        const struct mlx5e_profile *profile,
4431                                        void *ppriv)
4432 {
4433         int nch = profile->max_nch(mdev);
4434         struct net_device *netdev;
4435         struct mlx5e_priv *priv;
4436
4437         netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
4438                                     nch * profile->max_tc,
4439                                     nch);
4440         if (!netdev) {
4441                 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4442                 return NULL;
4443         }
4444
4445 #ifdef CONFIG_RFS_ACCEL
4446         netdev->rx_cpu_rmap = mdev->rmap;
4447 #endif
4448
4449         profile->init(mdev, netdev, profile, ppriv);
4450
4451         netif_carrier_off(netdev);
4452
4453         priv = netdev_priv(netdev);
4454
4455         priv->wq = create_singlethread_workqueue("mlx5e");
4456         if (!priv->wq)
4457                 goto err_cleanup_nic;
4458
4459         return netdev;
4460
4461 err_cleanup_nic:
4462         if (profile->cleanup)
4463                 profile->cleanup(priv);
4464         free_netdev(netdev);
4465
4466         return NULL;
4467 }
4468
4469 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
4470 {
4471         struct mlx5_core_dev *mdev = priv->mdev;
4472         const struct mlx5e_profile *profile;
4473         int err;
4474
4475         profile = priv->profile;
4476         clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
4477
4478         err = profile->init_tx(priv);
4479         if (err)
4480                 goto out;
4481
4482         err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
4483         if (err) {
4484                 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
4485                 goto err_cleanup_tx;
4486         }
4487
4488         err = profile->init_rx(priv);
4489         if (err)
4490                 goto err_close_drop_rq;
4491
4492         mlx5e_create_q_counter(priv);
4493
4494         if (profile->enable)
4495                 profile->enable(priv);
4496
4497         return 0;
4498
4499 err_close_drop_rq:
4500         mlx5e_close_drop_rq(&priv->drop_rq);
4501
4502 err_cleanup_tx:
4503         profile->cleanup_tx(priv);
4504
4505 out:
4506         return err;
4507 }
4508
4509 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
4510 {
4511         const struct mlx5e_profile *profile = priv->profile;
4512
4513         set_bit(MLX5E_STATE_DESTROYING, &priv->state);
4514
4515         if (profile->disable)
4516                 profile->disable(priv);
4517         flush_workqueue(priv->wq);
4518
4519         mlx5e_destroy_q_counter(priv);
4520         profile->cleanup_rx(priv);
4521         mlx5e_close_drop_rq(&priv->drop_rq);
4522         profile->cleanup_tx(priv);
4523         cancel_delayed_work_sync(&priv->update_stats_work);
4524 }
4525
4526 void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
4527 {
4528         const struct mlx5e_profile *profile = priv->profile;
4529         struct net_device *netdev = priv->netdev;
4530
4531         destroy_workqueue(priv->wq);
4532         if (profile->cleanup)
4533                 profile->cleanup(priv);
4534         free_netdev(netdev);
4535 }
4536
4537 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4538  * hardware contexts and to connect it to the current netdev.
4539  */
4540 static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4541 {
4542         struct mlx5e_priv *priv = vpriv;
4543         struct net_device *netdev = priv->netdev;
4544         int err;
4545
4546         if (netif_device_present(netdev))
4547                 return 0;
4548
4549         err = mlx5e_create_mdev_resources(mdev);
4550         if (err)
4551                 return err;
4552
4553         err = mlx5e_attach_netdev(priv);
4554         if (err) {
4555                 mlx5e_destroy_mdev_resources(mdev);
4556                 return err;
4557         }
4558
4559         return 0;
4560 }
4561
4562 static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4563 {
4564         struct mlx5e_priv *priv = vpriv;
4565         struct net_device *netdev = priv->netdev;
4566
4567         if (!netif_device_present(netdev))
4568                 return;
4569
4570         mlx5e_detach_netdev(priv);
4571         mlx5e_destroy_mdev_resources(mdev);
4572 }
4573
4574 static void *mlx5e_add(struct mlx5_core_dev *mdev)
4575 {
4576         struct net_device *netdev;
4577         void *rpriv = NULL;
4578         void *priv;
4579         int err;
4580
4581         err = mlx5e_check_required_hca_cap(mdev);
4582         if (err)
4583                 return NULL;
4584
4585 #ifdef CONFIG_MLX5_ESWITCH
4586         if (MLX5_VPORT_MANAGER(mdev)) {
4587                 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
4588                 if (!rpriv) {
4589                         mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
4590                         return NULL;
4591                 }
4592         }
4593 #endif
4594
4595         netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
4596         if (!netdev) {
4597                 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
4598                 goto err_free_rpriv;
4599         }
4600
4601         priv = netdev_priv(netdev);
4602
4603         err = mlx5e_attach(mdev, priv);
4604         if (err) {
4605                 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4606                 goto err_destroy_netdev;
4607         }
4608
4609         err = register_netdev(netdev);
4610         if (err) {
4611                 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4612                 goto err_detach;
4613         }
4614
4615         return priv;
4616
4617 err_detach:
4618         mlx5e_detach(mdev, priv);
4619 err_destroy_netdev:
4620         mlx5e_destroy_netdev(priv);
4621 err_free_rpriv:
4622         kfree(rpriv);
4623         return NULL;
4624 }
4625
4626 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4627 {
4628         struct mlx5e_priv *priv = vpriv;
4629         void *ppriv = priv->ppriv;
4630
4631         unregister_netdev(priv->netdev);
4632         mlx5e_detach(mdev, vpriv);
4633         mlx5e_destroy_netdev(priv);
4634         kfree(ppriv);
4635 }
4636
4637 static void *mlx5e_get_netdev(void *vpriv)
4638 {
4639         struct mlx5e_priv *priv = vpriv;
4640
4641         return priv->netdev;
4642 }
4643
4644 static struct mlx5_interface mlx5e_interface = {
4645         .add       = mlx5e_add,
4646         .remove    = mlx5e_remove,
4647         .attach    = mlx5e_attach,
4648         .detach    = mlx5e_detach,
4649         .event     = mlx5e_async_event,
4650         .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
4651         .get_dev   = mlx5e_get_netdev,
4652 };
4653
4654 void mlx5e_init(void)
4655 {
4656         mlx5e_ipsec_build_inverse_table();
4657         mlx5e_build_ptys2ethtool_map();
4658         mlx5_register_interface(&mlx5e_interface);
4659 }
4660
4661 void mlx5e_cleanup(void)
4662 {
4663         mlx5_unregister_interface(&mlx5e_interface);
4664 }