1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #include <linux/mlx5/eswitch.h>
7 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
8 ((dmn)->info.caps.dmn_type##_sw_owner || \
9 ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
10 (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
12 static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
14 /* Per vport cached FW FT for checksum recalculation, this
15 * recalculation is needed due to a HW bug in STEv0.
17 xa_init(&dmn->csum_fts_xa);
20 static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
22 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
25 xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
27 mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
30 xa_destroy(&dmn->csum_fts_xa);
33 int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
37 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
40 recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
42 /* Table hasn't been created yet */
43 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
47 ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
48 recalc_cs_ft, GFP_KERNEL));
53 *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
58 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
62 dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
64 mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
68 ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
70 mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
74 dmn->uar = mlx5_get_uars_page(dmn->mdev);
76 mlx5dr_err(dmn, "Couldn't allocate UAR\n");
81 dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
82 if (!dmn->ste_icm_pool) {
83 mlx5dr_err(dmn, "Couldn't get icm memory\n");
88 dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
89 if (!dmn->action_icm_pool) {
90 mlx5dr_err(dmn, "Couldn't get action icm memory\n");
92 goto free_ste_icm_pool;
95 ret = mlx5dr_send_ring_alloc(dmn);
97 mlx5dr_err(dmn, "Couldn't create send-ring\n");
98 goto free_action_icm_pool;
103 free_action_icm_pool:
104 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
106 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
108 mlx5_put_uars_page(dmn->mdev, dmn->uar);
110 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
115 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
117 mlx5dr_send_ring_free(dmn, dmn->send_ring);
118 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
119 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
120 mlx5_put_uars_page(dmn->mdev, dmn->uar);
121 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
124 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
126 struct mlx5dr_cmd_vport_cap *vport_caps)
128 u16 cmd_vport = vport_number;
129 bool other_vport = true;
132 if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
137 ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
140 &vport_caps->icm_address_rx,
141 &vport_caps->icm_address_tx);
145 ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
148 &vport_caps->vport_gvmi);
152 vport_caps->num = vport_number;
153 vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
158 static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
160 return dr_domain_query_vport(dmn,
161 dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
162 &dmn->info.caps.esw_manager_vport_caps);
165 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
167 struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
168 struct mlx5dr_cmd_vport_cap *wire_vport;
172 ret = dr_domain_query_esw_mngr(dmn);
176 /* Query vports (except wire vport) */
177 for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
178 ret = dr_domain_query_vport(dmn,
180 &dmn->info.caps.vports_caps[vport]);
185 /* Last vport is the wire port */
186 wire_vport = &dmn->info.caps.vports_caps[vport];
187 wire_vport->num = MLX5_VPORT_UPLINK;
188 wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
189 wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
190 wire_vport->vport_gvmi = 0;
191 wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
196 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
197 struct mlx5dr_domain *dmn)
201 if (!dmn->info.caps.eswitch_manager)
204 ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
208 dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
209 dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
210 dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
211 dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
213 dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
214 sizeof(dmn->info.caps.vports_caps[0]),
216 if (!dmn->info.caps.vports_caps)
219 ret = dr_domain_query_vports(dmn);
221 mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
222 goto free_vports_caps;
225 dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
230 kfree(dmn->info.caps.vports_caps);
231 dmn->info.caps.vports_caps = NULL;
235 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
236 struct mlx5dr_domain *dmn)
238 struct mlx5dr_cmd_vport_cap *vport_cap;
241 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
242 mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
246 dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
248 ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
252 ret = dr_domain_query_fdb_caps(mdev, dmn);
257 case MLX5DR_DOMAIN_TYPE_NIC_RX:
258 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
261 dmn->info.supp_sw_steering = true;
262 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
263 dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
264 dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
266 case MLX5DR_DOMAIN_TYPE_NIC_TX:
267 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
270 dmn->info.supp_sw_steering = true;
271 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
272 dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
273 dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
275 case MLX5DR_DOMAIN_TYPE_FDB:
276 if (!dmn->info.caps.eswitch_manager)
279 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
282 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
283 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
284 vport_cap = &dmn->info.caps.esw_manager_vport_caps;
286 dmn->info.supp_sw_steering = true;
287 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
288 dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
289 dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
290 dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
293 mlx5dr_err(dmn, "Invalid domain\n");
301 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
303 kfree(dmn->info.caps.vports_caps);
306 struct mlx5dr_domain *
307 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
309 struct mlx5dr_domain *dmn;
312 if (type > MLX5DR_DOMAIN_TYPE_FDB)
315 dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
321 refcount_set(&dmn->refcount, 1);
322 mutex_init(&dmn->info.rx.mutex);
323 mutex_init(&dmn->info.tx.mutex);
325 if (dr_domain_caps_init(mdev, dmn)) {
326 mlx5dr_err(dmn, "Failed init domain, no caps\n");
330 dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
331 dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
332 dmn->info.caps.log_icm_size);
334 if (!dmn->info.supp_sw_steering) {
335 mlx5dr_err(dmn, "SW steering is not supported\n");
339 /* Allocate resources */
340 ret = dr_domain_init_resources(dmn);
342 mlx5dr_err(dmn, "Failed init domain resources\n");
346 dr_domain_init_csum_recalc_fts(dmn);
351 dr_domain_caps_uninit(dmn);
357 /* Assure synchronization of the device steering tables with updates made by SW
360 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
364 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
365 mlx5dr_domain_lock(dmn);
366 ret = mlx5dr_send_ring_force_drain(dmn);
367 mlx5dr_domain_unlock(dmn);
369 mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
375 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
376 ret = mlx5dr_cmd_sync_steering(dmn->mdev);
381 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
383 if (refcount_read(&dmn->refcount) > 1)
386 /* make sure resources are not used by the hardware */
387 mlx5dr_cmd_sync_steering(dmn->mdev);
388 dr_domain_uninit_csum_recalc_fts(dmn);
389 dr_domain_uninit_resources(dmn);
390 dr_domain_caps_uninit(dmn);
391 mutex_destroy(&dmn->info.tx.mutex);
392 mutex_destroy(&dmn->info.rx.mutex);
397 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
398 struct mlx5dr_domain *peer_dmn)
400 mlx5dr_domain_lock(dmn);
403 refcount_dec(&dmn->peer_dmn->refcount);
405 dmn->peer_dmn = peer_dmn;
408 refcount_inc(&dmn->peer_dmn->refcount);
410 mlx5dr_domain_unlock(dmn);