net/mlx5: Bridge, match FDB entry vlan tag
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / esw / bridge.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3
4 #include <linux/netdevice.h>
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7 #include <linux/xarray.h>
8 #include <linux/if_bridge.h>
9 #include <net/switchdev.h>
10 #include "bridge.h"
11 #include "eswitch.h"
12 #include "fs_core.h"
13
14 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
15 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
16 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
17 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
18         (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
19 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
20
21 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
22 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
23 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
24 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
25         (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
26 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
27
28 enum {
29         MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
30         MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
31 };
32
33 struct mlx5_esw_bridge_fdb_key {
34         unsigned char addr[ETH_ALEN];
35         u16 vid;
36 };
37
38 enum {
39         MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
40 };
41
42 struct mlx5_esw_bridge_fdb_entry {
43         struct mlx5_esw_bridge_fdb_key key;
44         struct rhash_head ht_node;
45         struct net_device *dev;
46         struct list_head list;
47         u16 vport_num;
48         u16 flags;
49
50         struct mlx5_flow_handle *ingress_handle;
51         struct mlx5_fc *ingress_counter;
52         unsigned long lastuse;
53         struct mlx5_flow_handle *egress_handle;
54 };
55
56 static const struct rhashtable_params fdb_ht_params = {
57         .key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
58         .key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
59         .head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
60         .automatic_shrinking = true,
61 };
62
63 struct mlx5_esw_bridge_vlan {
64         u16 vid;
65         u16 flags;
66 };
67
68 struct mlx5_esw_bridge_port {
69         u16 vport_num;
70         struct xarray vlans;
71 };
72
73 enum {
74         MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
75 };
76
77 struct mlx5_esw_bridge {
78         int ifindex;
79         int refcnt;
80         struct list_head list;
81         struct mlx5_esw_bridge_offloads *br_offloads;
82
83         struct list_head fdb_list;
84         struct rhashtable fdb_ht;
85         struct xarray vports;
86
87         struct mlx5_flow_table *egress_ft;
88         struct mlx5_flow_group *egress_vlan_fg;
89         struct mlx5_flow_group *egress_mac_fg;
90         unsigned long ageing_time;
91         u32 flags;
92 };
93
94 static void
95 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
96                                    unsigned long val)
97 {
98         struct switchdev_notifier_fdb_info send_info;
99
100         send_info.addr = addr;
101         send_info.vid = vid;
102         send_info.offloaded = true;
103         call_switchdev_notifiers(val, dev, &send_info.info, NULL);
104 }
105
106 static struct mlx5_flow_table *
107 mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
108 {
109         struct mlx5_flow_table_attr ft_attr = {};
110         struct mlx5_core_dev *dev = esw->dev;
111         struct mlx5_flow_namespace *ns;
112         struct mlx5_flow_table *fdb;
113
114         ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
115         if (!ns) {
116                 esw_warn(dev, "Failed to get FDB namespace\n");
117                 return ERR_PTR(-ENOENT);
118         }
119
120         ft_attr.max_fte = max_fte;
121         ft_attr.level = level;
122         ft_attr.prio = FDB_BR_OFFLOAD;
123         fdb = mlx5_create_flow_table(ns, &ft_attr);
124         if (IS_ERR(fdb))
125                 esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
126
127         return fdb;
128 }
129
130 static struct mlx5_flow_group *
131 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
132 {
133         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
134         struct mlx5_flow_group *fg;
135         u32 *in, *match;
136
137         in = kvzalloc(inlen, GFP_KERNEL);
138         if (!in)
139                 return ERR_PTR(-ENOMEM);
140
141         MLX5_SET(create_flow_group_in, in, match_criteria_enable,
142                  MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
143         match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
144
145         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
146         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
147         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
148         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
149
150         MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
151                  mlx5_eswitch_get_vport_metadata_mask());
152
153         MLX5_SET(create_flow_group_in, in, start_flow_index,
154                  MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
155         MLX5_SET(create_flow_group_in, in, end_flow_index,
156                  MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
157
158         fg = mlx5_create_flow_group(ingress_ft, in);
159         kvfree(in);
160         if (IS_ERR(fg))
161                 esw_warn(esw->dev,
162                          "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
163                          PTR_ERR(fg));
164
165         return fg;
166 }
167
168 static struct mlx5_flow_group *
169 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
170 {
171         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
172         struct mlx5_flow_group *fg;
173         u32 *in, *match;
174
175         in = kvzalloc(inlen, GFP_KERNEL);
176         if (!in)
177                 return ERR_PTR(-ENOMEM);
178
179         MLX5_SET(create_flow_group_in, in, match_criteria_enable,
180                  MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
181         match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
182
183         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
184         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
185
186         MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
187                  mlx5_eswitch_get_vport_metadata_mask());
188
189         MLX5_SET(create_flow_group_in, in, start_flow_index,
190                  MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
191         MLX5_SET(create_flow_group_in, in, end_flow_index,
192                  MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
193
194         fg = mlx5_create_flow_group(ingress_ft, in);
195         if (IS_ERR(fg))
196                 esw_warn(esw->dev,
197                          "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
198                          PTR_ERR(fg));
199
200         kvfree(in);
201         return fg;
202 }
203
204 static struct mlx5_flow_group *
205 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
206 {
207         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
208         struct mlx5_flow_group *fg;
209         u32 *in, *match;
210
211         in = kvzalloc(inlen, GFP_KERNEL);
212         if (!in)
213                 return ERR_PTR(-ENOMEM);
214
215         MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
216         match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
217
218         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
219         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
220         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
221         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
222
223         MLX5_SET(create_flow_group_in, in, start_flow_index,
224                  MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
225         MLX5_SET(create_flow_group_in, in, end_flow_index,
226                  MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
227
228         fg = mlx5_create_flow_group(egress_ft, in);
229         if (IS_ERR(fg))
230                 esw_warn(esw->dev,
231                          "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
232                          PTR_ERR(fg));
233         kvfree(in);
234         return fg;
235 }
236
237 static struct mlx5_flow_group *
238 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
239 {
240         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
241         struct mlx5_flow_group *fg;
242         u32 *in, *match;
243
244         in = kvzalloc(inlen, GFP_KERNEL);
245         if (!in)
246                 return ERR_PTR(-ENOMEM);
247
248         MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
249         match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
250
251         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
252         MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
253
254         MLX5_SET(create_flow_group_in, in, start_flow_index,
255                  MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
256         MLX5_SET(create_flow_group_in, in, end_flow_index,
257                  MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
258
259         fg = mlx5_create_flow_group(egress_ft, in);
260         if (IS_ERR(fg))
261                 esw_warn(esw->dev,
262                          "Failed to create bridge egress table MAC flow group (err=%ld)\n",
263                          PTR_ERR(fg));
264         kvfree(in);
265         return fg;
266 }
267
268 static int
269 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
270 {
271         struct mlx5_flow_group *mac_fg, *vlan_fg;
272         struct mlx5_flow_table *ingress_ft;
273         int err;
274
275         if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
276                 return -EOPNOTSUPP;
277
278         ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
279                                                   MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
280                                                   br_offloads->esw);
281         if (IS_ERR(ingress_ft))
282                 return PTR_ERR(ingress_ft);
283
284         vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
285         if (IS_ERR(vlan_fg)) {
286                 err = PTR_ERR(vlan_fg);
287                 goto err_vlan_fg;
288         }
289
290         mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
291         if (IS_ERR(mac_fg)) {
292                 err = PTR_ERR(mac_fg);
293                 goto err_mac_fg;
294         }
295
296         br_offloads->ingress_ft = ingress_ft;
297         br_offloads->ingress_vlan_fg = vlan_fg;
298         br_offloads->ingress_mac_fg = mac_fg;
299         return 0;
300
301 err_mac_fg:
302         mlx5_destroy_flow_group(vlan_fg);
303 err_vlan_fg:
304         mlx5_destroy_flow_table(ingress_ft);
305         return err;
306 }
307
308 static void
309 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
310 {
311         mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
312         br_offloads->ingress_mac_fg = NULL;
313         mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
314         br_offloads->ingress_vlan_fg = NULL;
315         mlx5_destroy_flow_table(br_offloads->ingress_ft);
316         br_offloads->ingress_ft = NULL;
317 }
318
319 static int
320 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
321                                   struct mlx5_esw_bridge *bridge)
322 {
323         struct mlx5_flow_group *mac_fg, *vlan_fg;
324         struct mlx5_flow_table *egress_ft;
325         int err;
326
327         egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
328                                                  MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
329                                                  br_offloads->esw);
330         if (IS_ERR(egress_ft))
331                 return PTR_ERR(egress_ft);
332
333         vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
334         if (IS_ERR(vlan_fg)) {
335                 err = PTR_ERR(vlan_fg);
336                 goto err_vlan_fg;
337         }
338
339         mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
340         if (IS_ERR(mac_fg)) {
341                 err = PTR_ERR(mac_fg);
342                 goto err_mac_fg;
343         }
344
345         bridge->egress_ft = egress_ft;
346         bridge->egress_vlan_fg = vlan_fg;
347         bridge->egress_mac_fg = mac_fg;
348         return 0;
349
350 err_mac_fg:
351         mlx5_destroy_flow_group(vlan_fg);
352 err_vlan_fg:
353         mlx5_destroy_flow_table(egress_ft);
354         return err;
355 }
356
357 static void
358 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
359 {
360         mlx5_destroy_flow_group(bridge->egress_mac_fg);
361         mlx5_destroy_flow_group(bridge->egress_vlan_fg);
362         mlx5_destroy_flow_table(bridge->egress_ft);
363 }
364
365 static struct mlx5_flow_handle *
366 mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
367                                     struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
368                                     struct mlx5_esw_bridge *bridge)
369 {
370         struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
371         struct mlx5_flow_act flow_act = {
372                 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
373                 .flags = FLOW_ACT_NO_APPEND,
374         };
375         struct mlx5_flow_destination dests[2] = {};
376         struct mlx5_flow_spec *rule_spec;
377         struct mlx5_flow_handle *handle;
378         u8 *smac_v, *smac_c;
379
380         rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
381         if (!rule_spec)
382                 return ERR_PTR(-ENOMEM);
383
384         rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
385
386         smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
387                               outer_headers.smac_47_16);
388         ether_addr_copy(smac_v, addr);
389         smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
390                               outer_headers.smac_47_16);
391         eth_broadcast_addr(smac_c);
392
393         MLX5_SET(fte_match_param, rule_spec->match_criteria,
394                  misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
395         MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
396                  mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
397
398         if (vlan) {
399                 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
400                                  outer_headers.cvlan_tag);
401                 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
402                                  outer_headers.cvlan_tag);
403                 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
404                                  outer_headers.first_vid);
405                 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
406                          vlan->vid);
407         }
408
409         dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
410         dests[0].ft = bridge->egress_ft;
411         dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
412         dests[1].counter_id = counter_id;
413
414         handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
415                                      ARRAY_SIZE(dests));
416
417         kvfree(rule_spec);
418         return handle;
419 }
420
421 static struct mlx5_flow_handle *
422 mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr,
423                                    struct mlx5_esw_bridge_vlan *vlan,
424                                    struct mlx5_esw_bridge *bridge)
425 {
426         struct mlx5_flow_destination dest = {
427                 .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
428                 .vport.num = vport_num,
429         };
430         struct mlx5_flow_act flow_act = {
431                 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
432                 .flags = FLOW_ACT_NO_APPEND,
433         };
434         struct mlx5_flow_spec *rule_spec;
435         struct mlx5_flow_handle *handle;
436         u8 *dmac_v, *dmac_c;
437
438         rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
439         if (!rule_spec)
440                 return ERR_PTR(-ENOMEM);
441
442         rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
443
444         dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
445                               outer_headers.dmac_47_16);
446         ether_addr_copy(dmac_v, addr);
447         dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
448                               outer_headers.dmac_47_16);
449         eth_broadcast_addr(dmac_c);
450
451         if (vlan) {
452                 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
453                                  outer_headers.cvlan_tag);
454                 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
455                                  outer_headers.cvlan_tag);
456                 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
457                                  outer_headers.first_vid);
458                 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
459                          vlan->vid);
460         }
461
462         handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
463
464         kvfree(rule_spec);
465         return handle;
466 }
467
468 static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
469                                                       struct mlx5_esw_bridge_offloads *br_offloads)
470 {
471         struct mlx5_esw_bridge *bridge;
472         int err;
473
474         bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
475         if (!bridge)
476                 return ERR_PTR(-ENOMEM);
477
478         bridge->br_offloads = br_offloads;
479         err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
480         if (err)
481                 goto err_egress_tbl;
482
483         err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
484         if (err)
485                 goto err_fdb_ht;
486
487         INIT_LIST_HEAD(&bridge->fdb_list);
488         xa_init(&bridge->vports);
489         bridge->ifindex = ifindex;
490         bridge->refcnt = 1;
491         bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
492         list_add(&bridge->list, &br_offloads->bridges);
493
494         return bridge;
495
496 err_fdb_ht:
497         mlx5_esw_bridge_egress_table_cleanup(bridge);
498 err_egress_tbl:
499         kvfree(bridge);
500         return ERR_PTR(err);
501 }
502
503 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
504 {
505         bridge->refcnt++;
506 }
507
508 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
509                                 struct mlx5_esw_bridge *bridge)
510 {
511         if (--bridge->refcnt)
512                 return;
513
514         mlx5_esw_bridge_egress_table_cleanup(bridge);
515         WARN_ON(!xa_empty(&bridge->vports));
516         list_del(&bridge->list);
517         rhashtable_destroy(&bridge->fdb_ht);
518         kvfree(bridge);
519
520         if (list_empty(&br_offloads->bridges))
521                 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
522 }
523
524 static struct mlx5_esw_bridge *
525 mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
526 {
527         struct mlx5_esw_bridge *bridge;
528
529         ASSERT_RTNL();
530
531         list_for_each_entry(bridge, &br_offloads->bridges, list) {
532                 if (bridge->ifindex == ifindex) {
533                         mlx5_esw_bridge_get(bridge);
534                         return bridge;
535                 }
536         }
537
538         if (!br_offloads->ingress_ft) {
539                 int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
540
541                 if (err)
542                         return ERR_PTR(err);
543         }
544
545         bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
546         if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
547                 mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
548         return bridge;
549 }
550
551 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
552                                        struct mlx5_esw_bridge *bridge)
553 {
554         return xa_insert(&bridge->vports, port->vport_num, port, GFP_KERNEL);
555 }
556
557 static struct mlx5_esw_bridge_port *
558 mlx5_esw_bridge_port_lookup(u16 vport_num, struct mlx5_esw_bridge *bridge)
559 {
560         return xa_load(&bridge->vports, vport_num);
561 }
562
563 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
564                                        struct mlx5_esw_bridge *bridge)
565 {
566         xa_erase(&bridge->vports, port->vport_num);
567 }
568
569 static void
570 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
571                                   struct mlx5_esw_bridge *bridge)
572 {
573         rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
574         mlx5_del_flow_rules(entry->egress_handle);
575         mlx5_del_flow_rules(entry->ingress_handle);
576         mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
577         list_del(&entry->list);
578         kvfree(entry);
579 }
580
581 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
582 {
583         struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
584
585         list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
586                 if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
587                         mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
588                                                            entry->key.vid,
589                                                            SWITCHDEV_FDB_DEL_TO_BRIDGE);
590                 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
591         }
592 }
593
594 static struct mlx5_esw_bridge_vlan *
595 mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
596 {
597         return xa_load(&port->vlans, vid);
598 }
599
600 static struct mlx5_esw_bridge_vlan *
601 mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port)
602 {
603         struct mlx5_esw_bridge_vlan *vlan;
604         int err;
605
606         vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
607         if (!vlan)
608                 return ERR_PTR(-ENOMEM);
609
610         vlan->vid = vid;
611         vlan->flags = flags;
612         err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
613         if (err) {
614                 kvfree(vlan);
615                 return ERR_PTR(err);
616         }
617
618         return vlan;
619 }
620
621 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
622                                        struct mlx5_esw_bridge_vlan *vlan)
623 {
624         xa_erase(&port->vlans, vlan->vid);
625 }
626
627 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
628                                          struct mlx5_esw_bridge_vlan *vlan)
629 {
630         mlx5_esw_bridge_vlan_erase(port, vlan);
631         kvfree(vlan);
632 }
633
634 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port)
635 {
636         struct mlx5_esw_bridge_vlan *vlan;
637         unsigned long index;
638
639         xa_for_each(&port->vlans, index, vlan)
640                 mlx5_esw_bridge_vlan_cleanup(port, vlan);
641 }
642
643 static struct mlx5_esw_bridge_vlan *
644 mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge *bridge,
645                                  struct mlx5_eswitch *esw)
646 {
647         struct mlx5_esw_bridge_port *port;
648         struct mlx5_esw_bridge_vlan *vlan;
649
650         port = mlx5_esw_bridge_port_lookup(vport_num, bridge);
651         if (!port) {
652                 /* FDB is added asynchronously on wq while port might have been deleted
653                  * concurrently. Report on 'info' logging level and skip the FDB offload.
654                  */
655                 esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
656                 return ERR_PTR(-EINVAL);
657         }
658
659         vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
660         if (!vlan) {
661                 /* FDB is added asynchronously on wq while vlan might have been deleted
662                  * concurrently. Report on 'info' logging level and skip the FDB offload.
663                  */
664                 esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
665                          vport_num);
666                 return ERR_PTR(-EINVAL);
667         }
668
669         return vlan;
670 }
671
672 static struct mlx5_esw_bridge_fdb_entry *
673 mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsigned char *addr,
674                                u16 vid, bool added_by_user, struct mlx5_eswitch *esw,
675                                struct mlx5_esw_bridge *bridge)
676 {
677         struct mlx5_esw_bridge_vlan *vlan = NULL;
678         struct mlx5_esw_bridge_fdb_entry *entry;
679         struct mlx5_flow_handle *handle;
680         struct mlx5_fc *counter;
681         struct mlx5e_priv *priv;
682         int err;
683
684         if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
685                 vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, bridge, esw);
686                 if (IS_ERR(vlan))
687                         return ERR_CAST(vlan);
688                 if (vlan->flags & (BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED))
689                         return ERR_PTR(-EOPNOTSUPP); /* can't offload vlan push/pop */
690         }
691
692         priv = netdev_priv(dev);
693         entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
694         if (!entry)
695                 return ERR_PTR(-ENOMEM);
696
697         ether_addr_copy(entry->key.addr, addr);
698         entry->key.vid = vid;
699         entry->dev = dev;
700         entry->vport_num = vport_num;
701         entry->lastuse = jiffies;
702         if (added_by_user)
703                 entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
704
705         counter = mlx5_fc_create(priv->mdev, true);
706         if (IS_ERR(counter)) {
707                 err = PTR_ERR(counter);
708                 goto err_ingress_fc_create;
709         }
710         entry->ingress_counter = counter;
711
712         handle = mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_fc_id(counter),
713                                                      bridge);
714         if (IS_ERR(handle)) {
715                 err = PTR_ERR(handle);
716                 esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
717                          vport_num, err);
718                 goto err_ingress_flow_create;
719         }
720         entry->ingress_handle = handle;
721
722         handle = mlx5_esw_bridge_egress_flow_create(vport_num, addr, vlan, bridge);
723         if (IS_ERR(handle)) {
724                 err = PTR_ERR(handle);
725                 esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
726                          vport_num, err);
727                 goto err_egress_flow_create;
728         }
729         entry->egress_handle = handle;
730
731         err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
732         if (err) {
733                 esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
734                 goto err_ht_init;
735         }
736
737         list_add(&entry->list, &bridge->fdb_list);
738         return entry;
739
740 err_ht_init:
741         mlx5_del_flow_rules(entry->egress_handle);
742 err_egress_flow_create:
743         mlx5_del_flow_rules(entry->ingress_handle);
744 err_ingress_flow_create:
745         mlx5_fc_destroy(priv->mdev, entry->ingress_counter);
746 err_ingress_fc_create:
747         kvfree(entry);
748         return ERR_PTR(err);
749 }
750
751 int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
752                                     struct mlx5_vport *vport)
753 {
754         if (!vport->bridge)
755                 return -EINVAL;
756
757         vport->bridge->ageing_time = ageing_time;
758         return 0;
759 }
760
761 int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
762                                        struct mlx5_vport *vport)
763 {
764         struct mlx5_esw_bridge *bridge;
765         bool filtering;
766
767         if (!vport->bridge)
768                 return -EINVAL;
769
770         bridge = vport->bridge;
771         filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
772         if (filtering == enable)
773                 return 0;
774
775         mlx5_esw_bridge_fdb_flush(bridge);
776         if (enable)
777                 bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
778         else
779                 bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
780
781         return 0;
782 }
783
784 static int mlx5_esw_bridge_vport_init(struct mlx5_esw_bridge_offloads *br_offloads,
785                                       struct mlx5_esw_bridge *bridge,
786                                       struct mlx5_vport *vport)
787 {
788         struct mlx5_eswitch *esw = br_offloads->esw;
789         struct mlx5_esw_bridge_port *port;
790         int err;
791
792         port = kvzalloc(sizeof(*port), GFP_KERNEL);
793         if (!port) {
794                 err = -ENOMEM;
795                 goto err_port_alloc;
796         }
797
798         port->vport_num = vport->vport;
799         xa_init(&port->vlans);
800         err = mlx5_esw_bridge_port_insert(port, bridge);
801         if (err) {
802                 esw_warn(esw->dev, "Failed to insert port metadata (vport=%u,err=%d)\n",
803                          vport->vport, err);
804                 goto err_port_insert;
805         }
806
807         vport->bridge = bridge;
808         return 0;
809
810 err_port_insert:
811         kvfree(port);
812 err_port_alloc:
813         mlx5_esw_bridge_put(br_offloads, bridge);
814         return err;
815 }
816
817 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
818                                          struct mlx5_vport *vport)
819 {
820         struct mlx5_esw_bridge *bridge = vport->bridge;
821         struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
822         struct mlx5_esw_bridge_port *port;
823
824         list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
825                 if (entry->vport_num == vport->vport)
826                         mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
827
828         port = mlx5_esw_bridge_port_lookup(vport->vport, bridge);
829         if (!port) {
830                 WARN(1, "Vport %u metadata not found on bridge", vport->vport);
831                 return -EINVAL;
832         }
833
834         mlx5_esw_bridge_port_vlans_flush(port);
835         mlx5_esw_bridge_port_erase(port, bridge);
836         kvfree(port);
837         mlx5_esw_bridge_put(br_offloads, bridge);
838         vport->bridge = NULL;
839         return 0;
840 }
841
842 int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
843                                struct mlx5_vport *vport, struct netlink_ext_ack *extack)
844 {
845         struct mlx5_esw_bridge *bridge;
846         int err;
847
848         WARN_ON(vport->bridge);
849
850         bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
851         if (IS_ERR(bridge)) {
852                 NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
853                 return PTR_ERR(bridge);
854         }
855
856         err = mlx5_esw_bridge_vport_init(br_offloads, bridge, vport);
857         if (err)
858                 NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
859         return err;
860 }
861
862 int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
863                                  struct mlx5_vport *vport, struct netlink_ext_ack *extack)
864 {
865         struct mlx5_esw_bridge *bridge = vport->bridge;
866         int err;
867
868         if (!bridge) {
869                 NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
870                 return -EINVAL;
871         }
872         if (bridge->ifindex != ifindex) {
873                 NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
874                 return -EINVAL;
875         }
876
877         err = mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
878         if (err)
879                 NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
880         return err;
881 }
882
883 int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
884                                   struct mlx5_vport *vport, struct netlink_ext_ack *extack)
885 {
886         struct mlx5_esw_bridge_port *port;
887         struct mlx5_esw_bridge_vlan *vlan;
888
889         port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
890         if (!port)
891                 return -EINVAL;
892
893         vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
894         if (vlan) {
895                 vlan->flags = flags;
896                 return 0;
897         }
898
899         vlan = mlx5_esw_bridge_vlan_create(vid, flags, port);
900         if (IS_ERR(vlan)) {
901                 NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
902                 return PTR_ERR(vlan);
903         }
904         return 0;
905 }
906
907 void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport)
908 {
909         struct mlx5_esw_bridge_port *port;
910         struct mlx5_esw_bridge_vlan *vlan;
911
912         port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
913         if (!port)
914                 return;
915
916         vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
917         if (!vlan)
918                 return;
919         mlx5_esw_bridge_vlan_cleanup(port, vlan);
920 }
921
922 void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
923                                 struct mlx5_vport *vport,
924                                 struct switchdev_notifier_fdb_info *fdb_info)
925 {
926         struct mlx5_esw_bridge *bridge = vport->bridge;
927         struct mlx5_esw_bridge_fdb_entry *entry;
928         u16 vport_num = vport->vport;
929
930         if (!bridge) {
931                 esw_info(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
932                 return;
933         }
934
935         entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, fdb_info->addr, fdb_info->vid,
936                                                fdb_info->added_by_user, esw, bridge);
937         if (IS_ERR(entry))
938                 return;
939
940         if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
941                 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
942                                                    SWITCHDEV_FDB_OFFLOADED);
943         else
944                 /* Take over dynamic entries to prevent kernel bridge from aging them out. */
945                 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
946                                                    SWITCHDEV_FDB_ADD_TO_BRIDGE);
947 }
948
949 void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
950                                 struct mlx5_vport *vport,
951                                 struct switchdev_notifier_fdb_info *fdb_info)
952 {
953         struct mlx5_esw_bridge *bridge = vport->bridge;
954         struct mlx5_esw_bridge_fdb_entry *entry;
955         struct mlx5_esw_bridge_fdb_key key;
956         u16 vport_num = vport->vport;
957
958         if (!bridge) {
959                 esw_warn(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
960                 return;
961         }
962
963         ether_addr_copy(key.addr, fdb_info->addr);
964         key.vid = fdb_info->vid;
965         entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
966         if (!entry) {
967                 esw_warn(esw->dev,
968                          "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
969                          key.addr, key.vid, vport_num);
970                 return;
971         }
972
973         if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
974                 mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
975                                                    SWITCHDEV_FDB_DEL_TO_BRIDGE);
976         mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
977 }
978
979 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
980 {
981         struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
982         struct mlx5_esw_bridge *bridge;
983
984         list_for_each_entry(bridge, &br_offloads->bridges, list) {
985                 list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
986                         unsigned long lastuse =
987                                 (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
988
989                         if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
990                                 continue;
991
992                         if (time_after(lastuse, entry->lastuse)) {
993                                 entry->lastuse = lastuse;
994                                 /* refresh existing bridge entry */
995                                 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
996                                                                    entry->key.vid,
997                                                                    SWITCHDEV_FDB_ADD_TO_BRIDGE);
998                         } else if (time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
999                                 mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
1000                                                                    entry->key.vid,
1001                                                                    SWITCHDEV_FDB_DEL_TO_BRIDGE);
1002                                 mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1003                         }
1004                 }
1005         }
1006 }
1007
1008 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1009 {
1010         struct mlx5_eswitch *esw = br_offloads->esw;
1011         struct mlx5_vport *vport;
1012         unsigned long i;
1013
1014         mlx5_esw_for_each_vport(esw, i, vport)
1015                 if (vport->bridge)
1016                         mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
1017
1018         WARN_ONCE(!list_empty(&br_offloads->bridges),
1019                   "Cleaning up bridge offloads while still having bridges attached\n");
1020 }
1021
1022 struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1023 {
1024         struct mlx5_esw_bridge_offloads *br_offloads;
1025
1026         br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1027         if (!br_offloads)
1028                 return ERR_PTR(-ENOMEM);
1029
1030         INIT_LIST_HEAD(&br_offloads->bridges);
1031         br_offloads->esw = esw;
1032         esw->br_offloads = br_offloads;
1033
1034         return br_offloads;
1035 }
1036
1037 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1038 {
1039         struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1040
1041         if (!br_offloads)
1042                 return;
1043
1044         mlx5_esw_bridge_flush(br_offloads);
1045
1046         esw->br_offloads = NULL;
1047         kvfree(br_offloads);
1048 }