net/mlx5: DR, Support csum recalculation flow table on SFs
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / steering / dr_domain.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/mlx5/eswitch.h>
5 #include "dr_types.h"
6
7 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type)  \
8         ((dmn)->info.caps.dmn_type##_sw_owner ||        \
9          ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&    \
10           (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
11
12 static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
13 {
14         /* Per vport cached FW FT for checksum recalculation, this
15          * recalculation is needed due to a HW bug in STEv0.
16          */
17         xa_init(&dmn->csum_fts_xa);
18 }
19
20 static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
21 {
22         struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
23         unsigned long i;
24
25         xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
26                 if (recalc_cs_ft)
27                         mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
28         }
29
30         xa_destroy(&dmn->csum_fts_xa);
31 }
32
33 int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
34                                         u16 vport_num,
35                                         u64 *rx_icm_addr)
36 {
37         struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
38         int ret;
39
40         recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
41         if (!recalc_cs_ft) {
42                 /* Table hasn't been created yet */
43                 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
44                 if (!recalc_cs_ft)
45                         return -EINVAL;
46
47                 ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
48                                       recalc_cs_ft, GFP_KERNEL));
49                 if (ret)
50                         return ret;
51         }
52
53         *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
54
55         return 0;
56 }
57
58 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
59 {
60         int ret;
61
62         dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
63         if (!dmn->ste_ctx) {
64                 mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
65                 return -EOPNOTSUPP;
66         }
67
68         ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
69         if (ret) {
70                 mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
71                 return ret;
72         }
73
74         dmn->uar = mlx5_get_uars_page(dmn->mdev);
75         if (!dmn->uar) {
76                 mlx5dr_err(dmn, "Couldn't allocate UAR\n");
77                 ret = -ENOMEM;
78                 goto clean_pd;
79         }
80
81         dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
82         if (!dmn->ste_icm_pool) {
83                 mlx5dr_err(dmn, "Couldn't get icm memory\n");
84                 ret = -ENOMEM;
85                 goto clean_uar;
86         }
87
88         dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
89         if (!dmn->action_icm_pool) {
90                 mlx5dr_err(dmn, "Couldn't get action icm memory\n");
91                 ret = -ENOMEM;
92                 goto free_ste_icm_pool;
93         }
94
95         ret = mlx5dr_send_ring_alloc(dmn);
96         if (ret) {
97                 mlx5dr_err(dmn, "Couldn't create send-ring\n");
98                 goto free_action_icm_pool;
99         }
100
101         return 0;
102
103 free_action_icm_pool:
104         mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
105 free_ste_icm_pool:
106         mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
107 clean_uar:
108         mlx5_put_uars_page(dmn->mdev, dmn->uar);
109 clean_pd:
110         mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
111
112         return ret;
113 }
114
115 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
116 {
117         mlx5dr_send_ring_free(dmn, dmn->send_ring);
118         mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
119         mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
120         mlx5_put_uars_page(dmn->mdev, dmn->uar);
121         mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
122 }
123
124 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
125                                  u16 vport_number,
126                                  struct mlx5dr_cmd_vport_cap *vport_caps)
127 {
128         u16 cmd_vport = vport_number;
129         bool other_vport = true;
130         int ret;
131
132         if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
133                 other_vport = false;
134                 cmd_vport = 0;
135         }
136
137         ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
138                                                  other_vport,
139                                                  cmd_vport,
140                                                  &vport_caps->icm_address_rx,
141                                                  &vport_caps->icm_address_tx);
142         if (ret)
143                 return ret;
144
145         ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
146                                     other_vport,
147                                     cmd_vport,
148                                     &vport_caps->vport_gvmi);
149         if (ret)
150                 return ret;
151
152         vport_caps->num = vport_number;
153         vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
154
155         return 0;
156 }
157
158 static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
159 {
160         return dr_domain_query_vport(dmn,
161                                      dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
162                                      &dmn->info.caps.esw_manager_vport_caps);
163 }
164
165 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
166 {
167         struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
168         struct mlx5dr_cmd_vport_cap *wire_vport;
169         int vport;
170         int ret;
171
172         ret = dr_domain_query_esw_mngr(dmn);
173         if (ret)
174                 return ret;
175
176         /* Query vports (except wire vport) */
177         for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
178                 ret = dr_domain_query_vport(dmn,
179                                             vport,
180                                             &dmn->info.caps.vports_caps[vport]);
181                 if (ret)
182                         return ret;
183         }
184
185         /* Last vport is the wire port */
186         wire_vport = &dmn->info.caps.vports_caps[vport];
187         wire_vport->num = MLX5_VPORT_UPLINK;
188         wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
189         wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
190         wire_vport->vport_gvmi = 0;
191         wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
192
193         return 0;
194 }
195
196 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
197                                     struct mlx5dr_domain *dmn)
198 {
199         int ret;
200
201         if (!dmn->info.caps.eswitch_manager)
202                 return -EOPNOTSUPP;
203
204         ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
205         if (ret)
206                 return ret;
207
208         dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
209         dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
210         dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
211         dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
212
213         dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
214                                              sizeof(dmn->info.caps.vports_caps[0]),
215                                              GFP_KERNEL);
216         if (!dmn->info.caps.vports_caps)
217                 return -ENOMEM;
218
219         ret = dr_domain_query_vports(dmn);
220         if (ret) {
221                 mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
222                 goto free_vports_caps;
223         }
224
225         dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
226
227         return 0;
228
229 free_vports_caps:
230         kfree(dmn->info.caps.vports_caps);
231         dmn->info.caps.vports_caps = NULL;
232         return ret;
233 }
234
235 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
236                                struct mlx5dr_domain *dmn)
237 {
238         struct mlx5dr_cmd_vport_cap *vport_cap;
239         int ret;
240
241         if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
242                 mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
243                 return -EOPNOTSUPP;
244         }
245
246         dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
247
248         ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
249         if (ret)
250                 return ret;
251
252         ret = dr_domain_query_fdb_caps(mdev, dmn);
253         if (ret)
254                 return ret;
255
256         switch (dmn->type) {
257         case MLX5DR_DOMAIN_TYPE_NIC_RX:
258                 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
259                         return -ENOTSUPP;
260
261                 dmn->info.supp_sw_steering = true;
262                 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
263                 dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
264                 dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
265                 break;
266         case MLX5DR_DOMAIN_TYPE_NIC_TX:
267                 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
268                         return -ENOTSUPP;
269
270                 dmn->info.supp_sw_steering = true;
271                 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
272                 dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
273                 dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
274                 break;
275         case MLX5DR_DOMAIN_TYPE_FDB:
276                 if (!dmn->info.caps.eswitch_manager)
277                         return -ENOTSUPP;
278
279                 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
280                         return -ENOTSUPP;
281
282                 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
283                 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
284                 vport_cap = &dmn->info.caps.esw_manager_vport_caps;
285
286                 dmn->info.supp_sw_steering = true;
287                 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
288                 dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
289                 dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
290                 dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
291                 break;
292         default:
293                 mlx5dr_err(dmn, "Invalid domain\n");
294                 ret = -EINVAL;
295                 break;
296         }
297
298         return ret;
299 }
300
301 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
302 {
303         kfree(dmn->info.caps.vports_caps);
304 }
305
306 struct mlx5dr_domain *
307 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
308 {
309         struct mlx5dr_domain *dmn;
310         int ret;
311
312         if (type > MLX5DR_DOMAIN_TYPE_FDB)
313                 return NULL;
314
315         dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
316         if (!dmn)
317                 return NULL;
318
319         dmn->mdev = mdev;
320         dmn->type = type;
321         refcount_set(&dmn->refcount, 1);
322         mutex_init(&dmn->info.rx.mutex);
323         mutex_init(&dmn->info.tx.mutex);
324
325         if (dr_domain_caps_init(mdev, dmn)) {
326                 mlx5dr_err(dmn, "Failed init domain, no caps\n");
327                 goto free_domain;
328         }
329
330         dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
331         dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
332                                             dmn->info.caps.log_icm_size);
333
334         if (!dmn->info.supp_sw_steering) {
335                 mlx5dr_err(dmn, "SW steering is not supported\n");
336                 goto uninit_caps;
337         }
338
339         /* Allocate resources */
340         ret = dr_domain_init_resources(dmn);
341         if (ret) {
342                 mlx5dr_err(dmn, "Failed init domain resources\n");
343                 goto uninit_caps;
344         }
345
346         dr_domain_init_csum_recalc_fts(dmn);
347
348         return dmn;
349
350 uninit_caps:
351         dr_domain_caps_uninit(dmn);
352 free_domain:
353         kfree(dmn);
354         return NULL;
355 }
356
357 /* Assure synchronization of the device steering tables with updates made by SW
358  * insertion.
359  */
360 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
361 {
362         int ret = 0;
363
364         if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
365                 mlx5dr_domain_lock(dmn);
366                 ret = mlx5dr_send_ring_force_drain(dmn);
367                 mlx5dr_domain_unlock(dmn);
368                 if (ret) {
369                         mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
370                                    flags, ret);
371                         return ret;
372                 }
373         }
374
375         if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
376                 ret = mlx5dr_cmd_sync_steering(dmn->mdev);
377
378         return ret;
379 }
380
381 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
382 {
383         if (refcount_read(&dmn->refcount) > 1)
384                 return -EBUSY;
385
386         /* make sure resources are not used by the hardware */
387         mlx5dr_cmd_sync_steering(dmn->mdev);
388         dr_domain_uninit_csum_recalc_fts(dmn);
389         dr_domain_uninit_resources(dmn);
390         dr_domain_caps_uninit(dmn);
391         mutex_destroy(&dmn->info.tx.mutex);
392         mutex_destroy(&dmn->info.rx.mutex);
393         kfree(dmn);
394         return 0;
395 }
396
397 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
398                             struct mlx5dr_domain *peer_dmn)
399 {
400         mlx5dr_domain_lock(dmn);
401
402         if (dmn->peer_dmn)
403                 refcount_dec(&dmn->peer_dmn->refcount);
404
405         dmn->peer_dmn = peer_dmn;
406
407         if (dmn->peer_dmn)
408                 refcount_inc(&dmn->peer_dmn->refcount);
409
410         mlx5dr_domain_unlock(dmn);
411 }