} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- err = esw_offloads_init(esw, nvfs + MLX5_SPECIAL_VPORTS);
+ err = esw_offloads_init(esw, nvfs,
+ nvfs + MLX5_SPECIAL_VPORTS);
}
if (err)
{
struct esw_mc_addr *mc_promisc;
int old_mode;
- int nvports;
int i;
if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
esw->enabled_vports, esw->mode);
mc_promisc = &esw->mc_promisc;
- nvports = esw->enabled_vports;
if (esw->mode == SRIOV_LEGACY)
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
if (esw->mode == SRIOV_LEGACY)
esw_destroy_legacy_fdb_table(esw);
else if (esw->mode == SRIOV_OFFLOADS)
- esw_offloads_cleanup(esw, nvports);
+ esw_offloads_cleanup(esw);
old_mode = esw->mode;
esw->mode = SRIOV_NONE;
#define fdb_prio_table(esw, chain, prio, level) \
(esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
+#define UPLINK_REP_INDEX 0
+
static struct mlx5_flow_table *
esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
static void
return 0;
}
+static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+{
+ if (!rep->rep_if[rep_type].valid)
+ return;
+
+ rep->rep_if[rep_type].unload(rep);
+}
+
static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int vport;
- for (vport = nvports - 1; vport >= 0; vport--) {
+ for (vport = nvports; vport >= MLX5_VPORT_FIRST_VF; vport--) {
rep = &esw->offloads.vport_reps[vport];
- if (!rep->rep_if[rep_type].valid)
- continue;
-
- rep->rep_if[rep_type].unload(rep);
+ __esw_offloads_unload_rep(esw, rep, rep_type);
}
+
+ rep = &esw->offloads.vport_reps[UPLINK_REP_INDEX];
+ __esw_offloads_unload_rep(esw, rep, rep_type);
}
static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
esw_offloads_unload_reps_type(esw, nvports, rep_type);
}
+static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+{
+ if (!rep->rep_if[rep_type].valid)
+ return 0;
+
+ return rep->rep_if[rep_type].load(esw->dev, rep);
+}
+
static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
{
int vport;
int err;
- for (vport = 0; vport < nvports; vport++) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->rep_if[rep_type].valid)
- continue;
+ rep = &esw->offloads.vport_reps[UPLINK_REP_INDEX];
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err)
+ goto out;
- err = rep->rep_if[rep_type].load(esw->dev, rep);
+ for (vport = MLX5_VPORT_FIRST_VF; vport <= nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_reps;
}
err_reps:
esw_offloads_unload_reps_type(esw, vport, rep_type);
+out:
return err;
}
esw_destroy_offloads_fdb_tables(esw);
}
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
+ int total_nvports)
{
int err;
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
- err = esw_offloads_steering_init(esw, nvports);
+ err = esw_offloads_steering_init(esw, total_nvports);
if (err)
return err;
- err = esw_offloads_load_reps(esw, nvports);
+ err = esw_offloads_load_reps(esw, vf_nvports);
if (err)
goto err_reps;
return err;
}
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
+void esw_offloads_cleanup(struct mlx5_eswitch *esw)
{
+ u16 num_vfs = esw->dev->priv.sriov.num_vfs;
+
esw_offloads_devcom_cleanup(esw);
- esw_offloads_unload_reps(esw, nvports);
+ esw_offloads_unload_reps(esw, num_vfs);
esw_offloads_steering_cleanup(esw);
}
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
-#define UPLINK_REP_INDEX 0
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;