Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <linux/refcount.h>
27 #include <linux/rhashtable.h>
28 #include <net/switchdev.h>
29 #include <net/pkt_cls.h>
30 #include <net/netevent.h>
31 #include <net/addrconf.h>
32 #include <linux/ptp_classify.h>
33
34 #include "spectrum.h"
35 #include "pci.h"
36 #include "core.h"
37 #include "core_env.h"
38 #include "reg.h"
39 #include "port.h"
40 #include "trap.h"
41 #include "txheader.h"
42 #include "spectrum_cnt.h"
43 #include "spectrum_dpipe.h"
44 #include "spectrum_acl_flex_actions.h"
45 #include "spectrum_span.h"
46 #include "spectrum_ptp.h"
47 #include "spectrum_trap.h"
48
49 #define MLXSW_SP_FWREV_MINOR 2010
50 #define MLXSW_SP_FWREV_SUBMINOR 1006
51
52 #define MLXSW_SP1_FWREV_MAJOR 13
53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
54
55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56         .major = MLXSW_SP1_FWREV_MAJOR,
57         .minor = MLXSW_SP_FWREV_MINOR,
58         .subminor = MLXSW_SP_FWREV_SUBMINOR,
59         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
60 };
61
62 #define MLXSW_SP1_FW_FILENAME \
63         "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
64         "." __stringify(MLXSW_SP_FWREV_MINOR) \
65         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
66
67 #define MLXSW_SP2_FWREV_MAJOR 29
68
69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70         .major = MLXSW_SP2_FWREV_MAJOR,
71         .minor = MLXSW_SP_FWREV_MINOR,
72         .subminor = MLXSW_SP_FWREV_SUBMINOR,
73 };
74
75 #define MLXSW_SP2_FW_FILENAME \
76         "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77         "." __stringify(MLXSW_SP_FWREV_MINOR) \
78         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
79
80 #define MLXSW_SP3_FWREV_MAJOR 30
81
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83         .major = MLXSW_SP3_FWREV_MAJOR,
84         .minor = MLXSW_SP_FWREV_MINOR,
85         .subminor = MLXSW_SP_FWREV_SUBMINOR,
86 };
87
88 #define MLXSW_SP3_FW_FILENAME \
89         "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90         "." __stringify(MLXSW_SP_FWREV_MINOR) \
91         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
92
93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
94         "mellanox/lc_ini_bundle_" \
95         __stringify(MLXSW_SP_FWREV_MINOR) "_" \
96         __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
97
98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
102
103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
104         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
105 };
106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
107         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108 };
109
110 /* tx_hdr_version
111  * Tx header version.
112  * Must be set to 1.
113  */
114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
115
116 /* tx_hdr_ctl
117  * Packet control type.
118  * 0 - Ethernet control (e.g. EMADs, LACP)
119  * 1 - Ethernet data
120  */
121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
122
123 /* tx_hdr_proto
124  * Packet protocol type. Must be set to 1 (Ethernet).
125  */
126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
127
128 /* tx_hdr_rx_is_router
129  * Packet is sent from the router. Valid for data packets only.
130  */
131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
132
133 /* tx_hdr_fid_valid
134  * Indicates if the 'fid' field is valid and should be used for
135  * forwarding lookup. Valid for data packets only.
136  */
137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
138
139 /* tx_hdr_swid
140  * Switch partition ID. Must be set to 0.
141  */
142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
143
144 /* tx_hdr_control_tclass
145  * Indicates if the packet should use the control TClass and not one
146  * of the data TClasses.
147  */
148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
149
150 /* tx_hdr_etclass
151  * Egress TClass to be used on the egress device on the egress port.
152  */
153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
154
155 /* tx_hdr_port_mid
156  * Destination local port for unicast packets.
157  * Destination multicast ID for multicast packets.
158  *
159  * Control packets are directed to a specific egress port, while data
160  * packets are transmitted through the CPU port (0) into the switch partition,
161  * where forwarding rules are applied.
162  */
163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
164
165 /* tx_hdr_fid
166  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
167  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
168  * Valid for data packets only.
169  */
170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
171
172 /* tx_hdr_type
173  * 0 - Data packets
174  * 6 - Control packets
175  */
176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
177
178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
179                               unsigned int counter_index, u64 *packets,
180                               u64 *bytes)
181 {
182         char mgpc_pl[MLXSW_REG_MGPC_LEN];
183         int err;
184
185         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
186                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
187         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
188         if (err)
189                 return err;
190         if (packets)
191                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
192         if (bytes)
193                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
194         return 0;
195 }
196
197 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
198                                        unsigned int counter_index)
199 {
200         char mgpc_pl[MLXSW_REG_MGPC_LEN];
201
202         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
203                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
204         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
205 }
206
207 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
208                                 unsigned int *p_counter_index)
209 {
210         int err;
211
212         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
213                                      p_counter_index);
214         if (err)
215                 return err;
216         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
217         if (err)
218                 goto err_counter_clear;
219         return 0;
220
221 err_counter_clear:
222         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
223                               *p_counter_index);
224         return err;
225 }
226
227 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
228                                 unsigned int counter_index)
229 {
230          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
231                                counter_index);
232 }
233
234 void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
235                               const struct mlxsw_tx_info *tx_info)
236 {
237         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
238
239         memset(txhdr, 0, MLXSW_TXHDR_LEN);
240
241         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
242         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
243         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
244         mlxsw_tx_hdr_swid_set(txhdr, 0);
245         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
246         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
247         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
248 }
249
250 int
251 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
252                                   struct mlxsw_sp_port *mlxsw_sp_port,
253                                   struct sk_buff *skb,
254                                   const struct mlxsw_tx_info *tx_info)
255 {
256         char *txhdr;
257         u16 max_fid;
258         int err;
259
260         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
261                 err = -ENOMEM;
262                 goto err_skb_cow_head;
263         }
264
265         if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
266                 err = -EIO;
267                 goto err_res_valid;
268         }
269         max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
270
271         txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
272         memset(txhdr, 0, MLXSW_TXHDR_LEN);
273
274         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
275         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
276         mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
277         mlxsw_tx_hdr_fid_valid_set(txhdr, true);
278         mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
279         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
280         return 0;
281
282 err_res_valid:
283 err_skb_cow_head:
284         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
285         dev_kfree_skb_any(skb);
286         return err;
287 }
288
289 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
290 {
291         unsigned int type;
292
293         if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
294                 return false;
295
296         type = ptp_classify_raw(skb);
297         return !!ptp_parse_header(skb, type);
298 }
299
300 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
301                                  struct mlxsw_sp_port *mlxsw_sp_port,
302                                  struct sk_buff *skb,
303                                  const struct mlxsw_tx_info *tx_info)
304 {
305         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
306
307         /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
308          * need special handling and cannot be transmitted as regular control
309          * packets.
310          */
311         if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
312                 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
313                                                           mlxsw_sp_port, skb,
314                                                           tx_info);
315
316         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
317                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
318                 dev_kfree_skb_any(skb);
319                 return -ENOMEM;
320         }
321
322         mlxsw_sp_txhdr_construct(skb, tx_info);
323         return 0;
324 }
325
326 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
327 {
328         switch (state) {
329         case BR_STATE_FORWARDING:
330                 return MLXSW_REG_SPMS_STATE_FORWARDING;
331         case BR_STATE_LEARNING:
332                 return MLXSW_REG_SPMS_STATE_LEARNING;
333         case BR_STATE_LISTENING:
334         case BR_STATE_DISABLED:
335         case BR_STATE_BLOCKING:
336                 return MLXSW_REG_SPMS_STATE_DISCARDING;
337         default:
338                 BUG();
339         }
340 }
341
342 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
343                               u8 state)
344 {
345         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
346         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
347         char *spms_pl;
348         int err;
349
350         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
351         if (!spms_pl)
352                 return -ENOMEM;
353         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
354         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
355
356         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
357         kfree(spms_pl);
358         return err;
359 }
360
361 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
362 {
363         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
364         int err;
365
366         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
367         if (err)
368                 return err;
369         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
370         return 0;
371 }
372
373 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
374                                    bool is_up)
375 {
376         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
377         char paos_pl[MLXSW_REG_PAOS_LEN];
378
379         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
380                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
381                             MLXSW_PORT_ADMIN_STATUS_DOWN);
382         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
383 }
384
385 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
386                                       const unsigned char *addr)
387 {
388         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
389         char ppad_pl[MLXSW_REG_PPAD_LEN];
390
391         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
392         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
393         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
394 }
395
396 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
397 {
398         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
399
400         eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
401                         mlxsw_sp_port->local_port);
402         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
403                                           mlxsw_sp_port->dev->dev_addr);
404 }
405
406 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
407 {
408         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409         char pmtu_pl[MLXSW_REG_PMTU_LEN];
410         int err;
411
412         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
413         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
414         if (err)
415                 return err;
416
417         *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
418         return 0;
419 }
420
421 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
422 {
423         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
424         char pmtu_pl[MLXSW_REG_PMTU_LEN];
425
426         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
427         if (mtu > mlxsw_sp_port->max_mtu)
428                 return -EINVAL;
429
430         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
431         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
432 }
433
434 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
435                                   u16 local_port, u8 swid)
436 {
437         char pspa_pl[MLXSW_REG_PSPA_LEN];
438
439         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
440         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
441 }
442
443 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
444 {
445         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
446         char svpe_pl[MLXSW_REG_SVPE_LEN];
447
448         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
449         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
450 }
451
452 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
453                                    bool learn_enable)
454 {
455         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
456         char *spvmlr_pl;
457         int err;
458
459         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
460         if (!spvmlr_pl)
461                 return -ENOMEM;
462         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
463                               learn_enable);
464         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
465         kfree(spvmlr_pl);
466         return err;
467 }
468
469 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
470 {
471         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
472         char spfsr_pl[MLXSW_REG_SPFSR_LEN];
473         int err;
474
475         if (mlxsw_sp_port->security == enable)
476                 return 0;
477
478         mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
479         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
480         if (err)
481                 return err;
482
483         mlxsw_sp_port->security = enable;
484         return 0;
485 }
486
487 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
488 {
489         switch (ethtype) {
490         case ETH_P_8021Q:
491                 *p_sver_type = 0;
492                 break;
493         case ETH_P_8021AD:
494                 *p_sver_type = 1;
495                 break;
496         default:
497                 return -EINVAL;
498         }
499
500         return 0;
501 }
502
503 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
504                                      u16 ethtype)
505 {
506         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
507         char spevet_pl[MLXSW_REG_SPEVET_LEN];
508         u8 sver_type;
509         int err;
510
511         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
512         if (err)
513                 return err;
514
515         mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
516         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
517 }
518
519 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
520                                     u16 vid, u16 ethtype)
521 {
522         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523         char spvid_pl[MLXSW_REG_SPVID_LEN];
524         u8 sver_type;
525         int err;
526
527         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
528         if (err)
529                 return err;
530
531         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
532                              sver_type);
533
534         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
535 }
536
537 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
538                                             bool allow)
539 {
540         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
541         char spaft_pl[MLXSW_REG_SPAFT_LEN];
542
543         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
544         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
545 }
546
547 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
548                            u16 ethtype)
549 {
550         int err;
551
552         if (!vid) {
553                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
554                 if (err)
555                         return err;
556         } else {
557                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
558                 if (err)
559                         return err;
560                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
561                 if (err)
562                         goto err_port_allow_untagged_set;
563         }
564
565         mlxsw_sp_port->pvid = vid;
566         return 0;
567
568 err_port_allow_untagged_set:
569         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
570         return err;
571 }
572
573 static int
574 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
575 {
576         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577         char sspr_pl[MLXSW_REG_SSPR_LEN];
578
579         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
580         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
581 }
582
583 static int
584 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
585                                 u16 local_port, char *pmlp_pl,
586                                 struct mlxsw_sp_port_mapping *port_mapping)
587 {
588         bool separate_rxtx;
589         u8 first_lane;
590         u8 slot_index;
591         u8 module;
592         u8 width;
593         int i;
594
595         module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
596         slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
597         width = mlxsw_reg_pmlp_width_get(pmlp_pl);
598         separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
599         first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
600
601         if (width && !is_power_of_2(width)) {
602                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
603                         local_port);
604                 return -EINVAL;
605         }
606
607         for (i = 0; i < width; i++) {
608                 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
609                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
610                                 local_port);
611                         return -EINVAL;
612                 }
613                 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
614                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
615                                 local_port);
616                         return -EINVAL;
617                 }
618                 if (separate_rxtx &&
619                     mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
620                     mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
621                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
622                                 local_port);
623                         return -EINVAL;
624                 }
625                 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
626                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
627                                 local_port);
628                         return -EINVAL;
629                 }
630         }
631
632         port_mapping->module = module;
633         port_mapping->slot_index = slot_index;
634         port_mapping->width = width;
635         port_mapping->module_width = width;
636         port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
637         return 0;
638 }
639
640 static int
641 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
642                               struct mlxsw_sp_port_mapping *port_mapping)
643 {
644         char pmlp_pl[MLXSW_REG_PMLP_LEN];
645         int err;
646
647         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
648         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
649         if (err)
650                 return err;
651         return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
652                                                pmlp_pl, port_mapping);
653 }
654
655 static int
656 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
657                          const struct mlxsw_sp_port_mapping *port_mapping)
658 {
659         char pmlp_pl[MLXSW_REG_PMLP_LEN];
660         int i, err;
661
662         mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
663                                   port_mapping->module);
664
665         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
666         mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
667         for (i = 0; i < port_mapping->width; i++) {
668                 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
669                                               port_mapping->slot_index);
670                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
671                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
672         }
673
674         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
675         if (err)
676                 goto err_pmlp_write;
677         return 0;
678
679 err_pmlp_write:
680         mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
681                                     port_mapping->module);
682         return err;
683 }
684
685 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
686                                        u8 slot_index, u8 module)
687 {
688         char pmlp_pl[MLXSW_REG_PMLP_LEN];
689
690         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
691         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
692         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
693         mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
694 }
695
696 static int mlxsw_sp_port_open(struct net_device *dev)
697 {
698         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
699         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
700         int err;
701
702         err = mlxsw_env_module_port_up(mlxsw_sp->core,
703                                        mlxsw_sp_port->mapping.slot_index,
704                                        mlxsw_sp_port->mapping.module);
705         if (err)
706                 return err;
707         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
708         if (err)
709                 goto err_port_admin_status_set;
710         netif_start_queue(dev);
711         return 0;
712
713 err_port_admin_status_set:
714         mlxsw_env_module_port_down(mlxsw_sp->core,
715                                    mlxsw_sp_port->mapping.slot_index,
716                                    mlxsw_sp_port->mapping.module);
717         return err;
718 }
719
720 static int mlxsw_sp_port_stop(struct net_device *dev)
721 {
722         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
723         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724
725         netif_stop_queue(dev);
726         mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
727         mlxsw_env_module_port_down(mlxsw_sp->core,
728                                    mlxsw_sp_port->mapping.slot_index,
729                                    mlxsw_sp_port->mapping.module);
730         return 0;
731 }
732
733 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
734                                       struct net_device *dev)
735 {
736         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
737         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
738         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
739         const struct mlxsw_tx_info tx_info = {
740                 .local_port = mlxsw_sp_port->local_port,
741                 .is_emad = false,
742         };
743         u64 len;
744         int err;
745
746         memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
747
748         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
749                 return NETDEV_TX_BUSY;
750
751         if (eth_skb_pad(skb)) {
752                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
753                 return NETDEV_TX_OK;
754         }
755
756         err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
757                                     &tx_info);
758         if (err)
759                 return NETDEV_TX_OK;
760
761         /* TX header is consumed by HW on the way so we shouldn't count its
762          * bytes as being sent.
763          */
764         len = skb->len - MLXSW_TXHDR_LEN;
765
766         /* Due to a race we might fail here because of a full queue. In that
767          * unlikely case we simply drop the packet.
768          */
769         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
770
771         if (!err) {
772                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
773                 u64_stats_update_begin(&pcpu_stats->syncp);
774                 pcpu_stats->tx_packets++;
775                 pcpu_stats->tx_bytes += len;
776                 u64_stats_update_end(&pcpu_stats->syncp);
777         } else {
778                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
779                 dev_kfree_skb_any(skb);
780         }
781         return NETDEV_TX_OK;
782 }
783
784 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
785 {
786 }
787
788 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
789 {
790         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791         struct sockaddr *addr = p;
792         int err;
793
794         if (!is_valid_ether_addr(addr->sa_data))
795                 return -EADDRNOTAVAIL;
796
797         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
798         if (err)
799                 return err;
800         eth_hw_addr_set(dev, addr->sa_data);
801         return 0;
802 }
803
804 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
805 {
806         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
807         struct mlxsw_sp_hdroom orig_hdroom;
808         struct mlxsw_sp_hdroom hdroom;
809         int err;
810
811         orig_hdroom = *mlxsw_sp_port->hdroom;
812
813         hdroom = orig_hdroom;
814         hdroom.mtu = mtu;
815         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
816
817         err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
818         if (err) {
819                 netdev_err(dev, "Failed to configure port's headroom\n");
820                 return err;
821         }
822
823         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
824         if (err)
825                 goto err_port_mtu_set;
826         dev->mtu = mtu;
827         return 0;
828
829 err_port_mtu_set:
830         mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
831         return err;
832 }
833
834 static int
835 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
836                              struct rtnl_link_stats64 *stats)
837 {
838         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
839         struct mlxsw_sp_port_pcpu_stats *p;
840         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
841         u32 tx_dropped = 0;
842         unsigned int start;
843         int i;
844
845         for_each_possible_cpu(i) {
846                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
847                 do {
848                         start = u64_stats_fetch_begin(&p->syncp);
849                         rx_packets      = p->rx_packets;
850                         rx_bytes        = p->rx_bytes;
851                         tx_packets      = p->tx_packets;
852                         tx_bytes        = p->tx_bytes;
853                 } while (u64_stats_fetch_retry(&p->syncp, start));
854
855                 stats->rx_packets       += rx_packets;
856                 stats->rx_bytes         += rx_bytes;
857                 stats->tx_packets       += tx_packets;
858                 stats->tx_bytes         += tx_bytes;
859                 /* tx_dropped is u32, updated without syncp protection. */
860                 tx_dropped      += p->tx_dropped;
861         }
862         stats->tx_dropped       = tx_dropped;
863         return 0;
864 }
865
866 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
867 {
868         switch (attr_id) {
869         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
870                 return true;
871         }
872
873         return false;
874 }
875
876 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
877                                            void *sp)
878 {
879         switch (attr_id) {
880         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
881                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
882         }
883
884         return -EINVAL;
885 }
886
887 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
888                                 int prio, char *ppcnt_pl)
889 {
890         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
891         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
892
893         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
894         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
895 }
896
897 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
898                                       struct rtnl_link_stats64 *stats)
899 {
900         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
901         int err;
902
903         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
904                                           0, ppcnt_pl);
905         if (err)
906                 goto out;
907
908         stats->tx_packets =
909                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
910         stats->rx_packets =
911                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
912         stats->tx_bytes =
913                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
914         stats->rx_bytes =
915                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
916         stats->multicast =
917                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
918
919         stats->rx_crc_errors =
920                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
921         stats->rx_frame_errors =
922                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
923
924         stats->rx_length_errors = (
925                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
926                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
927                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
928
929         stats->rx_errors = (stats->rx_crc_errors +
930                 stats->rx_frame_errors + stats->rx_length_errors);
931
932 out:
933         return err;
934 }
935
936 static void
937 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
938                             struct mlxsw_sp_port_xstats *xstats)
939 {
940         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
941         int err, i;
942
943         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
944                                           ppcnt_pl);
945         if (!err)
946                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
947
948         for (i = 0; i < TC_MAX_QUEUE; i++) {
949                 err = mlxsw_sp_port_get_stats_raw(dev,
950                                                   MLXSW_REG_PPCNT_TC_CONG_CNT,
951                                                   i, ppcnt_pl);
952                 if (err)
953                         goto tc_cnt;
954
955                 xstats->wred_drop[i] =
956                         mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
957                 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
958
959 tc_cnt:
960                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
961                                                   i, ppcnt_pl);
962                 if (err)
963                         continue;
964
965                 xstats->backlog[i] =
966                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
967                 xstats->tail_drop[i] =
968                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
969         }
970
971         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
972                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
973                                                   i, ppcnt_pl);
974                 if (err)
975                         continue;
976
977                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
978                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
979         }
980 }
981
982 static void update_stats_cache(struct work_struct *work)
983 {
984         struct mlxsw_sp_port *mlxsw_sp_port =
985                 container_of(work, struct mlxsw_sp_port,
986                              periodic_hw_stats.update_dw.work);
987
988         if (!netif_carrier_ok(mlxsw_sp_port->dev))
989                 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
990                  * necessary when port goes down.
991                  */
992                 goto out;
993
994         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
995                                    &mlxsw_sp_port->periodic_hw_stats.stats);
996         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
997                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
998
999 out:
1000         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1001                                MLXSW_HW_STATS_UPDATE_TIME);
1002 }
1003
1004 /* Return the stats from a cache that is updated periodically,
1005  * as this function might get called in an atomic context.
1006  */
1007 static void
1008 mlxsw_sp_port_get_stats64(struct net_device *dev,
1009                           struct rtnl_link_stats64 *stats)
1010 {
1011         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1012
1013         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1014 }
1015
1016 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1017                                     u16 vid_begin, u16 vid_end,
1018                                     bool is_member, bool untagged)
1019 {
1020         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1021         char *spvm_pl;
1022         int err;
1023
1024         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1025         if (!spvm_pl)
1026                 return -ENOMEM;
1027
1028         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1029                             vid_end, is_member, untagged);
1030         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1031         kfree(spvm_pl);
1032         return err;
1033 }
1034
1035 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1036                            u16 vid_end, bool is_member, bool untagged)
1037 {
1038         u16 vid, vid_e;
1039         int err;
1040
1041         for (vid = vid_begin; vid <= vid_end;
1042              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1043                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1044                             vid_end);
1045
1046                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1047                                                is_member, untagged);
1048                 if (err)
1049                         return err;
1050         }
1051
1052         return 0;
1053 }
1054
1055 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1056                                      bool flush_default)
1057 {
1058         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1059
1060         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1061                                  &mlxsw_sp_port->vlans_list, list) {
1062                 if (!flush_default &&
1063                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1064                         continue;
1065                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1066         }
1067 }
1068
1069 static void
1070 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1071 {
1072         if (mlxsw_sp_port_vlan->bridge_port)
1073                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1074         else if (mlxsw_sp_port_vlan->fid)
1075                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1076 }
1077
1078 struct mlxsw_sp_port_vlan *
1079 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1080 {
1081         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1082         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1083         int err;
1084
1085         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1086         if (mlxsw_sp_port_vlan)
1087                 return ERR_PTR(-EEXIST);
1088
1089         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1090         if (err)
1091                 return ERR_PTR(err);
1092
1093         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1094         if (!mlxsw_sp_port_vlan) {
1095                 err = -ENOMEM;
1096                 goto err_port_vlan_alloc;
1097         }
1098
1099         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1100         mlxsw_sp_port_vlan->vid = vid;
1101         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1102
1103         return mlxsw_sp_port_vlan;
1104
1105 err_port_vlan_alloc:
1106         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1107         return ERR_PTR(err);
1108 }
1109
1110 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1111 {
1112         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1113         u16 vid = mlxsw_sp_port_vlan->vid;
1114
1115         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1116         list_del(&mlxsw_sp_port_vlan->list);
1117         kfree(mlxsw_sp_port_vlan);
1118         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1119 }
1120
1121 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1122                                  __be16 __always_unused proto, u16 vid)
1123 {
1124         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1125
1126         /* VLAN 0 is added to HW filter when device goes up, but it is
1127          * reserved in our case, so simply return.
1128          */
1129         if (!vid)
1130                 return 0;
1131
1132         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1133 }
1134
1135 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1136                                   __be16 __always_unused proto, u16 vid)
1137 {
1138         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1139         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1140
1141         /* VLAN 0 is removed from HW filter when device goes down, but
1142          * it is reserved in our case, so simply return.
1143          */
1144         if (!vid)
1145                 return 0;
1146
1147         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1148         if (!mlxsw_sp_port_vlan)
1149                 return 0;
1150         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1151
1152         return 0;
1153 }
1154
1155 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1156                                    struct flow_block_offload *f)
1157 {
1158         switch (f->binder_type) {
1159         case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1160                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1161         case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1162                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1163         case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1164                 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1165         case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1166                 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1167         default:
1168                 return -EOPNOTSUPP;
1169         }
1170 }
1171
1172 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1173                              void *type_data)
1174 {
1175         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1176
1177         switch (type) {
1178         case TC_SETUP_BLOCK:
1179                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1180         case TC_SETUP_QDISC_RED:
1181                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1182         case TC_SETUP_QDISC_PRIO:
1183                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1184         case TC_SETUP_QDISC_ETS:
1185                 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1186         case TC_SETUP_QDISC_TBF:
1187                 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1188         case TC_SETUP_QDISC_FIFO:
1189                 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1190         default:
1191                 return -EOPNOTSUPP;
1192         }
1193 }
1194
1195 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1196 {
1197         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1198
1199         if (!enable) {
1200                 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1201                     mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1202                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1203                         return -EINVAL;
1204                 }
1205                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1206                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1207         } else {
1208                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1209                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1210         }
1211         return 0;
1212 }
1213
1214 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1215 {
1216         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1217         char pplr_pl[MLXSW_REG_PPLR_LEN];
1218         int err;
1219
1220         if (netif_running(dev))
1221                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1222
1223         mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1224         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1225                               pplr_pl);
1226
1227         if (netif_running(dev))
1228                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1229
1230         return err;
1231 }
1232
1233 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1234
1235 static int mlxsw_sp_handle_feature(struct net_device *dev,
1236                                    netdev_features_t wanted_features,
1237                                    netdev_features_t feature,
1238                                    mlxsw_sp_feature_handler feature_handler)
1239 {
1240         netdev_features_t changes = wanted_features ^ dev->features;
1241         bool enable = !!(wanted_features & feature);
1242         int err;
1243
1244         if (!(changes & feature))
1245                 return 0;
1246
1247         err = feature_handler(dev, enable);
1248         if (err) {
1249                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1250                            enable ? "Enable" : "Disable", &feature, err);
1251                 return err;
1252         }
1253
1254         if (enable)
1255                 dev->features |= feature;
1256         else
1257                 dev->features &= ~feature;
1258
1259         return 0;
1260 }
1261 static int mlxsw_sp_set_features(struct net_device *dev,
1262                                  netdev_features_t features)
1263 {
1264         netdev_features_t oper_features = dev->features;
1265         int err = 0;
1266
1267         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1268                                        mlxsw_sp_feature_hw_tc);
1269         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1270                                        mlxsw_sp_feature_loopback);
1271
1272         if (err) {
1273                 dev->features = oper_features;
1274                 return -EINVAL;
1275         }
1276
1277         return 0;
1278 }
1279
1280 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1281                                       struct ifreq *ifr)
1282 {
1283         struct hwtstamp_config config;
1284         int err;
1285
1286         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1287                 return -EFAULT;
1288
1289         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1290                                                              &config);
1291         if (err)
1292                 return err;
1293
1294         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1295                 return -EFAULT;
1296
1297         return 0;
1298 }
1299
1300 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1301                                       struct ifreq *ifr)
1302 {
1303         struct hwtstamp_config config;
1304         int err;
1305
1306         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1307                                                              &config);
1308         if (err)
1309                 return err;
1310
1311         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1312                 return -EFAULT;
1313
1314         return 0;
1315 }
1316
1317 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1318 {
1319         struct hwtstamp_config config = {0};
1320
1321         mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1322 }
1323
1324 static int
1325 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1326 {
1327         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1328
1329         switch (cmd) {
1330         case SIOCSHWTSTAMP:
1331                 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1332         case SIOCGHWTSTAMP:
1333                 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1334         default:
1335                 return -EOPNOTSUPP;
1336         }
1337 }
1338
1339 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1340         .ndo_open               = mlxsw_sp_port_open,
1341         .ndo_stop               = mlxsw_sp_port_stop,
1342         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1343         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1344         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1345         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1346         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1347         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1348         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1349         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1350         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1351         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1352         .ndo_set_features       = mlxsw_sp_set_features,
1353         .ndo_eth_ioctl          = mlxsw_sp_port_ioctl,
1354 };
1355
1356 static int
1357 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1358 {
1359         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1360         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1361         const struct mlxsw_sp_port_type_speed_ops *ops;
1362         char ptys_pl[MLXSW_REG_PTYS_LEN];
1363         u32 eth_proto_cap_masked;
1364         int err;
1365
1366         ops = mlxsw_sp->port_type_speed_ops;
1367
1368         /* Set advertised speeds to speeds supported by both the driver
1369          * and the device.
1370          */
1371         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1372                                0, false);
1373         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1374         if (err)
1375                 return err;
1376
1377         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1378                                  &eth_proto_admin, &eth_proto_oper);
1379         eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1380         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1381                                eth_proto_cap_masked,
1382                                mlxsw_sp_port->link.autoneg);
1383         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1384 }
1385
1386 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1387 {
1388         const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1389         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1390         char ptys_pl[MLXSW_REG_PTYS_LEN];
1391         u32 eth_proto_oper;
1392         int err;
1393
1394         port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1395         port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1396                                                mlxsw_sp_port->local_port, 0,
1397                                                false);
1398         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1399         if (err)
1400                 return err;
1401         port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1402                                                  &eth_proto_oper);
1403         *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1404         return 0;
1405 }
1406
1407 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1408                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1409                           bool dwrr, u8 dwrr_weight)
1410 {
1411         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1412         char qeec_pl[MLXSW_REG_QEEC_LEN];
1413
1414         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1415                             next_index);
1416         mlxsw_reg_qeec_de_set(qeec_pl, true);
1417         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1418         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1419         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1420 }
1421
1422 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1423                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1424                                   u8 next_index, u32 maxrate, u8 burst_size)
1425 {
1426         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1427         char qeec_pl[MLXSW_REG_QEEC_LEN];
1428
1429         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1430                             next_index);
1431         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1432         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1433         mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1434         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1435 }
1436
1437 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1438                                     enum mlxsw_reg_qeec_hr hr, u8 index,
1439                                     u8 next_index, u32 minrate)
1440 {
1441         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1442         char qeec_pl[MLXSW_REG_QEEC_LEN];
1443
1444         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1445                             next_index);
1446         mlxsw_reg_qeec_mise_set(qeec_pl, true);
1447         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1448
1449         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1450 }
1451
1452 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1453                               u8 switch_prio, u8 tclass)
1454 {
1455         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1456         char qtct_pl[MLXSW_REG_QTCT_LEN];
1457
1458         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1459                             tclass);
1460         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1461 }
1462
1463 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1464 {
1465         int err, i;
1466
1467         /* Setup the elements hierarcy, so that each TC is linked to
1468          * one subgroup, which are all member in the same group.
1469          */
1470         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1471                                     MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1472         if (err)
1473                 return err;
1474         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1475                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1476                                             MLXSW_REG_QEEC_HR_SUBGROUP, i,
1477                                             0, false, 0);
1478                 if (err)
1479                         return err;
1480         }
1481         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1482                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1483                                             MLXSW_REG_QEEC_HR_TC, i, i,
1484                                             false, 0);
1485                 if (err)
1486                         return err;
1487
1488                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1489                                             MLXSW_REG_QEEC_HR_TC,
1490                                             i + 8, i,
1491                                             true, 100);
1492                 if (err)
1493                         return err;
1494         }
1495
1496         /* Make sure the max shaper is disabled in all hierarchies that support
1497          * it. Note that this disables ptps (PTP shaper), but that is intended
1498          * for the initial configuration.
1499          */
1500         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1501                                             MLXSW_REG_QEEC_HR_PORT, 0, 0,
1502                                             MLXSW_REG_QEEC_MAS_DIS, 0);
1503         if (err)
1504                 return err;
1505         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1506                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1507                                                     MLXSW_REG_QEEC_HR_SUBGROUP,
1508                                                     i, 0,
1509                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1510                 if (err)
1511                         return err;
1512         }
1513         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1514                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1515                                                     MLXSW_REG_QEEC_HR_TC,
1516                                                     i, i,
1517                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1518                 if (err)
1519                         return err;
1520
1521                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1522                                                     MLXSW_REG_QEEC_HR_TC,
1523                                                     i + 8, i,
1524                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1525                 if (err)
1526                         return err;
1527         }
1528
1529         /* Configure the min shaper for multicast TCs. */
1530         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1531                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1532                                                MLXSW_REG_QEEC_HR_TC,
1533                                                i + 8, i,
1534                                                MLXSW_REG_QEEC_MIS_MIN);
1535                 if (err)
1536                         return err;
1537         }
1538
1539         /* Map all priorities to traffic class 0. */
1540         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1541                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1542                 if (err)
1543                         return err;
1544         }
1545
1546         return 0;
1547 }
1548
1549 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1550                                         bool enable)
1551 {
1552         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1553         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1554
1555         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1556         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1557 }
1558
1559 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1560 {
1561         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1562         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1563         u8 module = mlxsw_sp_port->mapping.module;
1564         u64 overheat_counter;
1565         int err;
1566
1567         err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1568                                                     module, &overheat_counter);
1569         if (err)
1570                 return err;
1571
1572         mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1573         return 0;
1574 }
1575
1576 int
1577 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1578                                       bool is_8021ad_tagged,
1579                                       bool is_8021q_tagged)
1580 {
1581         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1582         char spvc_pl[MLXSW_REG_SPVC_LEN];
1583
1584         mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1585                             is_8021ad_tagged, is_8021q_tagged);
1586         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1587 }
1588
1589 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1590                                         u16 local_port, u8 *port_number,
1591                                         u8 *split_port_subnumber,
1592                                         u8 *slot_index)
1593 {
1594         char pllp_pl[MLXSW_REG_PLLP_LEN];
1595         int err;
1596
1597         mlxsw_reg_pllp_pack(pllp_pl, local_port);
1598         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1599         if (err)
1600                 return err;
1601         mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1602                               split_port_subnumber, slot_index);
1603         return 0;
1604 }
1605
1606 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1607                                 bool split,
1608                                 struct mlxsw_sp_port_mapping *port_mapping)
1609 {
1610         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1611         struct mlxsw_sp_port *mlxsw_sp_port;
1612         u32 lanes = port_mapping->width;
1613         u8 split_port_subnumber;
1614         struct net_device *dev;
1615         u8 port_number;
1616         u8 slot_index;
1617         bool splittable;
1618         int err;
1619
1620         err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1621         if (err) {
1622                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1623                         local_port);
1624                 return err;
1625         }
1626
1627         err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1628         if (err) {
1629                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1630                         local_port);
1631                 goto err_port_swid_set;
1632         }
1633
1634         err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1635                                            &split_port_subnumber, &slot_index);
1636         if (err) {
1637                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1638                         local_port);
1639                 goto err_port_label_info_get;
1640         }
1641
1642         splittable = lanes > 1 && !split;
1643         err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1644                                    port_number, split, split_port_subnumber,
1645                                    splittable, lanes, mlxsw_sp->base_mac,
1646                                    sizeof(mlxsw_sp->base_mac));
1647         if (err) {
1648                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1649                         local_port);
1650                 goto err_core_port_init;
1651         }
1652
1653         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1654         if (!dev) {
1655                 err = -ENOMEM;
1656                 goto err_alloc_etherdev;
1657         }
1658         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1659         dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1660         mlxsw_sp_port = netdev_priv(dev);
1661         mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
1662                                     mlxsw_sp_port, dev);
1663         mlxsw_sp_port->dev = dev;
1664         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1665         mlxsw_sp_port->local_port = local_port;
1666         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1667         mlxsw_sp_port->split = split;
1668         mlxsw_sp_port->mapping = *port_mapping;
1669         mlxsw_sp_port->link.autoneg = 1;
1670         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1671
1672         mlxsw_sp_port->pcpu_stats =
1673                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1674         if (!mlxsw_sp_port->pcpu_stats) {
1675                 err = -ENOMEM;
1676                 goto err_alloc_stats;
1677         }
1678
1679         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1680                           &update_stats_cache);
1681
1682         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1683         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1684
1685         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1686         if (err) {
1687                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1688                         mlxsw_sp_port->local_port);
1689                 goto err_dev_addr_init;
1690         }
1691
1692         netif_carrier_off(dev);
1693
1694         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1695                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1696         dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1697
1698         dev->min_mtu = 0;
1699         dev->max_mtu = ETH_MAX_MTU;
1700
1701         /* Each packet needs to have a Tx header (metadata) on top all other
1702          * headers.
1703          */
1704         dev->needed_headroom = MLXSW_TXHDR_LEN;
1705
1706         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1707         if (err) {
1708                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1709                         mlxsw_sp_port->local_port);
1710                 goto err_port_system_port_mapping_set;
1711         }
1712
1713         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1714         if (err) {
1715                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1716                         mlxsw_sp_port->local_port);
1717                 goto err_port_speed_by_width_set;
1718         }
1719
1720         err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1721                                                             &mlxsw_sp_port->max_speed);
1722         if (err) {
1723                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1724                         mlxsw_sp_port->local_port);
1725                 goto err_max_speed_get;
1726         }
1727
1728         err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1729         if (err) {
1730                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1731                         mlxsw_sp_port->local_port);
1732                 goto err_port_max_mtu_get;
1733         }
1734
1735         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1736         if (err) {
1737                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1738                         mlxsw_sp_port->local_port);
1739                 goto err_port_mtu_set;
1740         }
1741
1742         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1743         if (err)
1744                 goto err_port_admin_status_set;
1745
1746         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1747         if (err) {
1748                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1749                         mlxsw_sp_port->local_port);
1750                 goto err_port_buffers_init;
1751         }
1752
1753         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1754         if (err) {
1755                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1756                         mlxsw_sp_port->local_port);
1757                 goto err_port_ets_init;
1758         }
1759
1760         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1761         if (err) {
1762                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1763                         mlxsw_sp_port->local_port);
1764                 goto err_port_tc_mc_mode;
1765         }
1766
1767         /* ETS and buffers must be initialized before DCB. */
1768         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1769         if (err) {
1770                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1771                         mlxsw_sp_port->local_port);
1772                 goto err_port_dcb_init;
1773         }
1774
1775         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1776         if (err) {
1777                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1778                         mlxsw_sp_port->local_port);
1779                 goto err_port_fids_init;
1780         }
1781
1782         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1783         if (err) {
1784                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1785                         mlxsw_sp_port->local_port);
1786                 goto err_port_qdiscs_init;
1787         }
1788
1789         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1790                                      false);
1791         if (err) {
1792                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1793                         mlxsw_sp_port->local_port);
1794                 goto err_port_vlan_clear;
1795         }
1796
1797         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1798         if (err) {
1799                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1800                         mlxsw_sp_port->local_port);
1801                 goto err_port_nve_init;
1802         }
1803
1804         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1805                                      ETH_P_8021Q);
1806         if (err) {
1807                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1808                         mlxsw_sp_port->local_port);
1809                 goto err_port_pvid_set;
1810         }
1811
1812         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1813                                                        MLXSW_SP_DEFAULT_VID);
1814         if (IS_ERR(mlxsw_sp_port_vlan)) {
1815                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1816                         mlxsw_sp_port->local_port);
1817                 err = PTR_ERR(mlxsw_sp_port_vlan);
1818                 goto err_port_vlan_create;
1819         }
1820         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1821
1822         /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1823          * only packets with 802.1q header as tagged packets.
1824          */
1825         err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1826         if (err) {
1827                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1828                         local_port);
1829                 goto err_port_vlan_classification_set;
1830         }
1831
1832         INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1833                           mlxsw_sp->ptp_ops->shaper_work);
1834
1835         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1836
1837         err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1838         if (err) {
1839                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1840                         mlxsw_sp_port->local_port);
1841                 goto err_port_overheat_init_val_set;
1842         }
1843
1844         err = register_netdev(dev);
1845         if (err) {
1846                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1847                         mlxsw_sp_port->local_port);
1848                 goto err_register_netdev;
1849         }
1850
1851         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1852         return 0;
1853
1854 err_register_netdev:
1855 err_port_overheat_init_val_set:
1856         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1857 err_port_vlan_classification_set:
1858         mlxsw_sp->ports[local_port] = NULL;
1859         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1860 err_port_vlan_create:
1861 err_port_pvid_set:
1862         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1863 err_port_nve_init:
1864 err_port_vlan_clear:
1865         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1866 err_port_qdiscs_init:
1867         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1868 err_port_fids_init:
1869         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1870 err_port_dcb_init:
1871         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1872 err_port_tc_mc_mode:
1873 err_port_ets_init:
1874         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1875 err_port_buffers_init:
1876 err_port_admin_status_set:
1877 err_port_mtu_set:
1878 err_port_max_mtu_get:
1879 err_max_speed_get:
1880 err_port_speed_by_width_set:
1881 err_port_system_port_mapping_set:
1882 err_dev_addr_init:
1883         free_percpu(mlxsw_sp_port->pcpu_stats);
1884 err_alloc_stats:
1885         free_netdev(dev);
1886 err_alloc_etherdev:
1887         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1888 err_core_port_init:
1889 err_port_label_info_get:
1890         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1891                                MLXSW_PORT_SWID_DISABLED_PORT);
1892 err_port_swid_set:
1893         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1894                                    port_mapping->slot_index,
1895                                    port_mapping->module);
1896         return err;
1897 }
1898
1899 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1900 {
1901         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1902         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1903         u8 module = mlxsw_sp_port->mapping.module;
1904
1905         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1906         cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1907         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1908         mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1909         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1910         mlxsw_sp->ports[local_port] = NULL;
1911         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1912         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1913         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1914         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1915         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1916         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1917         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1918         free_percpu(mlxsw_sp_port->pcpu_stats);
1919         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1920         free_netdev(mlxsw_sp_port->dev);
1921         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1922         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1923                                MLXSW_PORT_SWID_DISABLED_PORT);
1924         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1925 }
1926
1927 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1928 {
1929         struct mlxsw_sp_port *mlxsw_sp_port;
1930         int err;
1931
1932         mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1933         if (!mlxsw_sp_port)
1934                 return -ENOMEM;
1935
1936         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1937         mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1938
1939         err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1940                                        mlxsw_sp_port,
1941                                        mlxsw_sp->base_mac,
1942                                        sizeof(mlxsw_sp->base_mac));
1943         if (err) {
1944                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1945                 goto err_core_cpu_port_init;
1946         }
1947
1948         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1949         return 0;
1950
1951 err_core_cpu_port_init:
1952         kfree(mlxsw_sp_port);
1953         return err;
1954 }
1955
1956 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1957 {
1958         struct mlxsw_sp_port *mlxsw_sp_port =
1959                                 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1960
1961         mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1962         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1963         kfree(mlxsw_sp_port);
1964 }
1965
1966 static bool mlxsw_sp_local_port_valid(u16 local_port)
1967 {
1968         return local_port != MLXSW_PORT_CPU_PORT;
1969 }
1970
1971 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1972 {
1973         if (!mlxsw_sp_local_port_valid(local_port))
1974                 return false;
1975         return mlxsw_sp->ports[local_port] != NULL;
1976 }
1977
1978 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1979                                            u16 local_port, bool enable)
1980 {
1981         char pmecr_pl[MLXSW_REG_PMECR_LEN];
1982
1983         mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1984                              enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1985                                       MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1986         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1987 }
1988
1989 struct mlxsw_sp_port_mapping_event {
1990         struct list_head list;
1991         char pmlp_pl[MLXSW_REG_PMLP_LEN];
1992 };
1993
1994 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1995 {
1996         struct mlxsw_sp_port_mapping_event *event, *next_event;
1997         struct mlxsw_sp_port_mapping_events *events;
1998         struct mlxsw_sp_port_mapping port_mapping;
1999         struct mlxsw_sp *mlxsw_sp;
2000         struct devlink *devlink;
2001         LIST_HEAD(event_queue);
2002         u16 local_port;
2003         int err;
2004
2005         events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
2006         mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
2007         devlink = priv_to_devlink(mlxsw_sp->core);
2008
2009         spin_lock_bh(&events->queue_lock);
2010         list_splice_init(&events->queue, &event_queue);
2011         spin_unlock_bh(&events->queue_lock);
2012
2013         list_for_each_entry_safe(event, next_event, &event_queue, list) {
2014                 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
2015                 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
2016                                                       event->pmlp_pl, &port_mapping);
2017                 if (err)
2018                         goto out;
2019
2020                 if (WARN_ON_ONCE(!port_mapping.width))
2021                         goto out;
2022
2023                 devl_lock(devlink);
2024
2025                 if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2026                         mlxsw_sp_port_create(mlxsw_sp, local_port,
2027                                              false, &port_mapping);
2028                 else
2029                         WARN_ON_ONCE(1);
2030
2031                 devl_unlock(devlink);
2032
2033                 mlxsw_sp->port_mapping[local_port] = port_mapping;
2034
2035 out:
2036                 kfree(event);
2037         }
2038 }
2039
2040 static void
2041 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2042                                     char *pmlp_pl, void *priv)
2043 {
2044         struct mlxsw_sp_port_mapping_events *events;
2045         struct mlxsw_sp_port_mapping_event *event;
2046         struct mlxsw_sp *mlxsw_sp = priv;
2047         u16 local_port;
2048
2049         local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
2050         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2051                 return;
2052
2053         events = &mlxsw_sp->port_mapping_events;
2054         event = kmalloc(sizeof(*event), GFP_ATOMIC);
2055         if (!event)
2056                 return;
2057         memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2058         spin_lock(&events->queue_lock);
2059         list_add_tail(&event->list, &events->queue);
2060         spin_unlock(&events->queue_lock);
2061         mlxsw_core_schedule_work(&events->work);
2062 }
2063
2064 static void
2065 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2066 {
2067         struct mlxsw_sp_port_mapping_event *event, *next_event;
2068         struct mlxsw_sp_port_mapping_events *events;
2069
2070         events = &mlxsw_sp->port_mapping_events;
2071
2072         /* Caller needs to make sure that no new event is going to appear. */
2073         cancel_work_sync(&events->work);
2074         list_for_each_entry_safe(event, next_event, &events->queue, list) {
2075                 list_del(&event->list);
2076                 kfree(event);
2077         }
2078 }
2079
2080 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2081 {
2082         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2083         int i;
2084
2085         for (i = 1; i < max_ports; i++)
2086                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2087         /* Make sure all scheduled events are processed */
2088         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2089
2090         for (i = 1; i < max_ports; i++)
2091                 if (mlxsw_sp_port_created(mlxsw_sp, i))
2092                         mlxsw_sp_port_remove(mlxsw_sp, i);
2093         mlxsw_sp_cpu_port_remove(mlxsw_sp);
2094         kfree(mlxsw_sp->ports);
2095         mlxsw_sp->ports = NULL;
2096 }
2097
2098 static void
2099 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2100                                bool (*selector)(void *priv, u16 local_port),
2101                                void *priv)
2102 {
2103         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2104         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2105         int i;
2106
2107         for (i = 1; i < max_ports; i++)
2108                 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
2109                         mlxsw_sp_port_remove(mlxsw_sp, i);
2110 }
2111
2112 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2113 {
2114         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2115         struct mlxsw_sp_port_mapping_events *events;
2116         struct mlxsw_sp_port_mapping *port_mapping;
2117         size_t alloc_size;
2118         int i;
2119         int err;
2120
2121         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2122         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2123         if (!mlxsw_sp->ports)
2124                 return -ENOMEM;
2125
2126         events = &mlxsw_sp->port_mapping_events;
2127         INIT_LIST_HEAD(&events->queue);
2128         spin_lock_init(&events->queue_lock);
2129         INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2130
2131         for (i = 1; i < max_ports; i++) {
2132                 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2133                 if (err)
2134                         goto err_event_enable;
2135         }
2136
2137         err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2138         if (err)
2139                 goto err_cpu_port_create;
2140
2141         for (i = 1; i < max_ports; i++) {
2142                 port_mapping = &mlxsw_sp->port_mapping[i];
2143                 if (!port_mapping->width)
2144                         continue;
2145                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2146                 if (err)
2147                         goto err_port_create;
2148         }
2149         return 0;
2150
2151 err_port_create:
2152         for (i--; i >= 1; i--)
2153                 if (mlxsw_sp_port_created(mlxsw_sp, i))
2154                         mlxsw_sp_port_remove(mlxsw_sp, i);
2155         i = max_ports;
2156         mlxsw_sp_cpu_port_remove(mlxsw_sp);
2157 err_cpu_port_create:
2158 err_event_enable:
2159         for (i--; i >= 1; i--)
2160                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2161         /* Make sure all scheduled events are processed */
2162         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2163         kfree(mlxsw_sp->ports);
2164         mlxsw_sp->ports = NULL;
2165         return err;
2166 }
2167
2168 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2169 {
2170         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2171         struct mlxsw_sp_port_mapping *port_mapping;
2172         int i;
2173         int err;
2174
2175         mlxsw_sp->port_mapping = kcalloc(max_ports,
2176                                          sizeof(struct mlxsw_sp_port_mapping),
2177                                          GFP_KERNEL);
2178         if (!mlxsw_sp->port_mapping)
2179                 return -ENOMEM;
2180
2181         for (i = 1; i < max_ports; i++) {
2182                 port_mapping = &mlxsw_sp->port_mapping[i];
2183                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2184                 if (err)
2185                         goto err_port_module_info_get;
2186         }
2187         return 0;
2188
2189 err_port_module_info_get:
2190         kfree(mlxsw_sp->port_mapping);
2191         return err;
2192 }
2193
2194 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2195 {
2196         kfree(mlxsw_sp->port_mapping);
2197 }
2198
2199 static int
2200 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2201                            struct mlxsw_sp_port_mapping *port_mapping,
2202                            unsigned int count, const char *pmtdb_pl)
2203 {
2204         struct mlxsw_sp_port_mapping split_port_mapping;
2205         int err, i;
2206
2207         split_port_mapping = *port_mapping;
2208         split_port_mapping.width /= count;
2209         for (i = 0; i < count; i++) {
2210                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2211
2212                 if (!mlxsw_sp_local_port_valid(s_local_port))
2213                         continue;
2214
2215                 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2216                                            true, &split_port_mapping);
2217                 if (err)
2218                         goto err_port_create;
2219                 split_port_mapping.lane += split_port_mapping.width;
2220         }
2221
2222         return 0;
2223
2224 err_port_create:
2225         for (i--; i >= 0; i--) {
2226                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2227
2228                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2229                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2230         }
2231         return err;
2232 }
2233
2234 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2235                                          unsigned int count,
2236                                          const char *pmtdb_pl)
2237 {
2238         struct mlxsw_sp_port_mapping *port_mapping;
2239         int i;
2240
2241         /* Go over original unsplit ports in the gap and recreate them. */
2242         for (i = 0; i < count; i++) {
2243                 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2244
2245                 port_mapping = &mlxsw_sp->port_mapping[local_port];
2246                 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2247                         continue;
2248                 mlxsw_sp_port_create(mlxsw_sp, local_port,
2249                                      false, port_mapping);
2250         }
2251 }
2252
2253 static struct mlxsw_sp_port *
2254 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2255 {
2256         if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2257                 return mlxsw_sp->ports[local_port];
2258         return NULL;
2259 }
2260
2261 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2262                                unsigned int count,
2263                                struct netlink_ext_ack *extack)
2264 {
2265         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2266         struct mlxsw_sp_port_mapping port_mapping;
2267         struct mlxsw_sp_port *mlxsw_sp_port;
2268         enum mlxsw_reg_pmtdb_status status;
2269         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2270         int i;
2271         int err;
2272
2273         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2274         if (!mlxsw_sp_port) {
2275                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2276                         local_port);
2277                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2278                 return -EINVAL;
2279         }
2280
2281         if (mlxsw_sp_port->split) {
2282                 NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2283                 return -EINVAL;
2284         }
2285
2286         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2287                              mlxsw_sp_port->mapping.module,
2288                              mlxsw_sp_port->mapping.module_width / count,
2289                              count);
2290         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2291         if (err) {
2292                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2293                 return err;
2294         }
2295
2296         status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2297         if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2298                 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2299                 return -EINVAL;
2300         }
2301
2302         port_mapping = mlxsw_sp_port->mapping;
2303
2304         for (i = 0; i < count; i++) {
2305                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2306
2307                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2308                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2309         }
2310
2311         err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2312                                          count, pmtdb_pl);
2313         if (err) {
2314                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2315                 goto err_port_split_create;
2316         }
2317
2318         return 0;
2319
2320 err_port_split_create:
2321         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2322
2323         return err;
2324 }
2325
2326 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2327                                  struct netlink_ext_ack *extack)
2328 {
2329         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2330         struct mlxsw_sp_port *mlxsw_sp_port;
2331         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2332         unsigned int count;
2333         int i;
2334         int err;
2335
2336         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2337         if (!mlxsw_sp_port) {
2338                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2339                         local_port);
2340                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2341                 return -EINVAL;
2342         }
2343
2344         if (!mlxsw_sp_port->split) {
2345                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2346                 return -EINVAL;
2347         }
2348
2349         count = mlxsw_sp_port->mapping.module_width /
2350                 mlxsw_sp_port->mapping.width;
2351
2352         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2353                              mlxsw_sp_port->mapping.module,
2354                              mlxsw_sp_port->mapping.module_width / count,
2355                              count);
2356         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2357         if (err) {
2358                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2359                 return err;
2360         }
2361
2362         for (i = 0; i < count; i++) {
2363                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2364
2365                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2366                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2367         }
2368
2369         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2370
2371         return 0;
2372 }
2373
2374 static void
2375 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2376 {
2377         int i;
2378
2379         for (i = 0; i < TC_MAX_QUEUE; i++)
2380                 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2381 }
2382
2383 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2384                                      char *pude_pl, void *priv)
2385 {
2386         struct mlxsw_sp *mlxsw_sp = priv;
2387         struct mlxsw_sp_port *mlxsw_sp_port;
2388         enum mlxsw_reg_pude_oper_status status;
2389         u16 local_port;
2390
2391         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2392
2393         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2394                 return;
2395         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2396         if (!mlxsw_sp_port)
2397                 return;
2398
2399         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2400         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2401                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2402                 netif_carrier_on(mlxsw_sp_port->dev);
2403                 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2404         } else {
2405                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2406                 netif_carrier_off(mlxsw_sp_port->dev);
2407                 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2408         }
2409 }
2410
2411 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2412                                           char *mtpptr_pl, bool ingress)
2413 {
2414         u16 local_port;
2415         u8 num_rec;
2416         int i;
2417
2418         local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2419         num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2420         for (i = 0; i < num_rec; i++) {
2421                 u8 domain_number;
2422                 u8 message_type;
2423                 u16 sequence_id;
2424                 u64 timestamp;
2425
2426                 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2427                                         &domain_number, &sequence_id,
2428                                         &timestamp);
2429                 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2430                                             message_type, domain_number,
2431                                             sequence_id, timestamp);
2432         }
2433 }
2434
2435 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2436                                               char *mtpptr_pl, void *priv)
2437 {
2438         struct mlxsw_sp *mlxsw_sp = priv;
2439
2440         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2441 }
2442
2443 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2444                                               char *mtpptr_pl, void *priv)
2445 {
2446         struct mlxsw_sp *mlxsw_sp = priv;
2447
2448         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2449 }
2450
2451 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2452                                        u16 local_port, void *priv)
2453 {
2454         struct mlxsw_sp *mlxsw_sp = priv;
2455         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2456         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2457
2458         if (unlikely(!mlxsw_sp_port)) {
2459                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2460                                      local_port);
2461                 return;
2462         }
2463
2464         skb->dev = mlxsw_sp_port->dev;
2465
2466         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2467         u64_stats_update_begin(&pcpu_stats->syncp);
2468         pcpu_stats->rx_packets++;
2469         pcpu_stats->rx_bytes += skb->len;
2470         u64_stats_update_end(&pcpu_stats->syncp);
2471
2472         skb->protocol = eth_type_trans(skb, skb->dev);
2473         netif_receive_skb(skb);
2474 }
2475
2476 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2477                                            void *priv)
2478 {
2479         skb->offload_fwd_mark = 1;
2480         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2481 }
2482
2483 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2484                                               u16 local_port, void *priv)
2485 {
2486         skb->offload_l3_fwd_mark = 1;
2487         skb->offload_fwd_mark = 1;
2488         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2489 }
2490
2491 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2492                           u16 local_port)
2493 {
2494         mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2495 }
2496
2497 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2498         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2499                   _is_ctrl, SP_##_trap_group, DISCARD)
2500
2501 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
2502         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
2503                 _is_ctrl, SP_##_trap_group, DISCARD)
2504
2505 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2506         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2507                 _is_ctrl, SP_##_trap_group, DISCARD)
2508
2509 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
2510         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2511
2512 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2513         /* Events */
2514         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2515         /* L2 traps */
2516         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2517         /* L3 traps */
2518         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2519                           false),
2520         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2521         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2522                           false),
2523         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2524                              ROUTER_EXP, false),
2525         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2526                              ROUTER_EXP, false),
2527         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2528                              ROUTER_EXP, false),
2529         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2530                              ROUTER_EXP, false),
2531         /* Multicast Router Traps */
2532         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2533         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2534         /* NVE traps */
2535         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2536 };
2537
2538 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2539         /* Events */
2540         MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2541         MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2542 };
2543
2544 static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2545         /* Events */
2546         MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2547 };
2548
2549 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2550 {
2551         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2552         char qpcr_pl[MLXSW_REG_QPCR_LEN];
2553         enum mlxsw_reg_qpcr_ir_units ir_units;
2554         int max_cpu_policers;
2555         bool is_bytes;
2556         u8 burst_size;
2557         u32 rate;
2558         int i, err;
2559
2560         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2561                 return -EIO;
2562
2563         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2564
2565         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2566         for (i = 0; i < max_cpu_policers; i++) {
2567                 is_bytes = false;
2568                 switch (i) {
2569                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2570                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2571                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2572                         rate = 1024;
2573                         burst_size = 7;
2574                         break;
2575                 default:
2576                         continue;
2577                 }
2578
2579                 __set_bit(i, mlxsw_sp->trap->policers_usage);
2580                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2581                                     burst_size);
2582                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2583                 if (err)
2584                         return err;
2585         }
2586
2587         return 0;
2588 }
2589
2590 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2591 {
2592         char htgt_pl[MLXSW_REG_HTGT_LEN];
2593         enum mlxsw_reg_htgt_trap_group i;
2594         int max_cpu_policers;
2595         int max_trap_groups;
2596         u8 priority, tc;
2597         u16 policer_id;
2598         int err;
2599
2600         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2601                 return -EIO;
2602
2603         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2604         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2605
2606         for (i = 0; i < max_trap_groups; i++) {
2607                 policer_id = i;
2608                 switch (i) {
2609                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2610                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2611                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2612                         priority = 1;
2613                         tc = 1;
2614                         break;
2615                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2616                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2617                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
2618                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2619                         break;
2620                 default:
2621                         continue;
2622                 }
2623
2624                 if (max_cpu_policers <= policer_id &&
2625                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2626                         return -EIO;
2627
2628                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2629                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2630                 if (err)
2631                         return err;
2632         }
2633
2634         return 0;
2635 }
2636
2637 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2638 {
2639         struct mlxsw_sp_trap *trap;
2640         u64 max_policers;
2641         int err;
2642
2643         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2644                 return -EIO;
2645         max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2646         trap = kzalloc(struct_size(trap, policers_usage,
2647                                    BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2648         if (!trap)
2649                 return -ENOMEM;
2650         trap->max_policers = max_policers;
2651         mlxsw_sp->trap = trap;
2652
2653         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2654         if (err)
2655                 goto err_cpu_policers_set;
2656
2657         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2658         if (err)
2659                 goto err_trap_groups_set;
2660
2661         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2662                                         ARRAY_SIZE(mlxsw_sp_listener),
2663                                         mlxsw_sp);
2664         if (err)
2665                 goto err_traps_register;
2666
2667         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2668                                         mlxsw_sp->listeners_count, mlxsw_sp);
2669         if (err)
2670                 goto err_extra_traps_init;
2671
2672         return 0;
2673
2674 err_extra_traps_init:
2675         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2676                                     ARRAY_SIZE(mlxsw_sp_listener),
2677                                     mlxsw_sp);
2678 err_traps_register:
2679 err_trap_groups_set:
2680 err_cpu_policers_set:
2681         kfree(trap);
2682         return err;
2683 }
2684
2685 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2686 {
2687         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2688                                     mlxsw_sp->listeners_count,
2689                                     mlxsw_sp);
2690         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2691                                     ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2692         kfree(mlxsw_sp->trap);
2693 }
2694
2695 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2696
2697 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2698 {
2699         char slcr_pl[MLXSW_REG_SLCR_LEN];
2700         u16 max_lag;
2701         u32 seed;
2702         int err;
2703
2704         seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2705                      MLXSW_SP_LAG_SEED_INIT);
2706         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2707                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2708                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2709                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2710                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2711                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2712                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2713                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2714                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2715         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2716         if (err)
2717                 return err;
2718
2719         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2720         if (err)
2721                 return err;
2722
2723         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2724                 return -EIO;
2725
2726         mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
2727                                  GFP_KERNEL);
2728         if (!mlxsw_sp->lags)
2729                 return -ENOMEM;
2730
2731         return 0;
2732 }
2733
2734 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2735 {
2736         kfree(mlxsw_sp->lags);
2737 }
2738
2739 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2740         .clock_init     = mlxsw_sp1_ptp_clock_init,
2741         .clock_fini     = mlxsw_sp1_ptp_clock_fini,
2742         .init           = mlxsw_sp1_ptp_init,
2743         .fini           = mlxsw_sp1_ptp_fini,
2744         .receive        = mlxsw_sp1_ptp_receive,
2745         .transmitted    = mlxsw_sp1_ptp_transmitted,
2746         .hwtstamp_get   = mlxsw_sp1_ptp_hwtstamp_get,
2747         .hwtstamp_set   = mlxsw_sp1_ptp_hwtstamp_set,
2748         .shaper_work    = mlxsw_sp1_ptp_shaper_work,
2749         .get_ts_info    = mlxsw_sp1_ptp_get_ts_info,
2750         .get_stats_count = mlxsw_sp1_get_stats_count,
2751         .get_stats_strings = mlxsw_sp1_get_stats_strings,
2752         .get_stats      = mlxsw_sp1_get_stats,
2753         .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2754 };
2755
2756 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2757         .clock_init     = mlxsw_sp2_ptp_clock_init,
2758         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2759         .init           = mlxsw_sp2_ptp_init,
2760         .fini           = mlxsw_sp2_ptp_fini,
2761         .receive        = mlxsw_sp2_ptp_receive,
2762         .transmitted    = mlxsw_sp2_ptp_transmitted,
2763         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2764         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2765         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2766         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2767         .get_stats_count = mlxsw_sp2_get_stats_count,
2768         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2769         .get_stats      = mlxsw_sp2_get_stats,
2770         .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2771 };
2772
2773 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2774         .clock_init     = mlxsw_sp2_ptp_clock_init,
2775         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2776         .init           = mlxsw_sp2_ptp_init,
2777         .fini           = mlxsw_sp2_ptp_fini,
2778         .receive        = mlxsw_sp2_ptp_receive,
2779         .transmitted    = mlxsw_sp2_ptp_transmitted,
2780         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2781         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2782         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2783         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2784         .get_stats_count = mlxsw_sp2_get_stats_count,
2785         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2786         .get_stats      = mlxsw_sp2_get_stats,
2787         .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2788 };
2789
2790 struct mlxsw_sp_sample_trigger_node {
2791         struct mlxsw_sp_sample_trigger trigger;
2792         struct mlxsw_sp_sample_params params;
2793         struct rhash_head ht_node;
2794         struct rcu_head rcu;
2795         refcount_t refcount;
2796 };
2797
2798 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2799         .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2800         .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2801         .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2802         .automatic_shrinking = true,
2803 };
2804
2805 static void
2806 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2807                                  const struct mlxsw_sp_sample_trigger *trigger)
2808 {
2809         memset(key, 0, sizeof(*key));
2810         key->type = trigger->type;
2811         key->local_port = trigger->local_port;
2812 }
2813
2814 /* RCU read lock must be held */
2815 struct mlxsw_sp_sample_params *
2816 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2817                                       const struct mlxsw_sp_sample_trigger *trigger)
2818 {
2819         struct mlxsw_sp_sample_trigger_node *trigger_node;
2820         struct mlxsw_sp_sample_trigger key;
2821
2822         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2823         trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2824                                          mlxsw_sp_sample_trigger_ht_params);
2825         if (!trigger_node)
2826                 return NULL;
2827
2828         return &trigger_node->params;
2829 }
2830
2831 static int
2832 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2833                                   const struct mlxsw_sp_sample_trigger *trigger,
2834                                   const struct mlxsw_sp_sample_params *params)
2835 {
2836         struct mlxsw_sp_sample_trigger_node *trigger_node;
2837         int err;
2838
2839         trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2840         if (!trigger_node)
2841                 return -ENOMEM;
2842
2843         trigger_node->trigger = *trigger;
2844         trigger_node->params = *params;
2845         refcount_set(&trigger_node->refcount, 1);
2846
2847         err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2848                                      &trigger_node->ht_node,
2849                                      mlxsw_sp_sample_trigger_ht_params);
2850         if (err)
2851                 goto err_rhashtable_insert;
2852
2853         return 0;
2854
2855 err_rhashtable_insert:
2856         kfree(trigger_node);
2857         return err;
2858 }
2859
2860 static void
2861 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2862                                   struct mlxsw_sp_sample_trigger_node *trigger_node)
2863 {
2864         rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2865                                &trigger_node->ht_node,
2866                                mlxsw_sp_sample_trigger_ht_params);
2867         kfree_rcu(trigger_node, rcu);
2868 }
2869
2870 int
2871 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2872                                    const struct mlxsw_sp_sample_trigger *trigger,
2873                                    const struct mlxsw_sp_sample_params *params,
2874                                    struct netlink_ext_ack *extack)
2875 {
2876         struct mlxsw_sp_sample_trigger_node *trigger_node;
2877         struct mlxsw_sp_sample_trigger key;
2878
2879         ASSERT_RTNL();
2880
2881         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2882
2883         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2884                                               &key,
2885                                               mlxsw_sp_sample_trigger_ht_params);
2886         if (!trigger_node)
2887                 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2888                                                          params);
2889
2890         if (trigger_node->trigger.local_port) {
2891                 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2892                 return -EINVAL;
2893         }
2894
2895         if (trigger_node->params.psample_group != params->psample_group ||
2896             trigger_node->params.truncate != params->truncate ||
2897             trigger_node->params.rate != params->rate ||
2898             trigger_node->params.trunc_size != params->trunc_size) {
2899                 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2900                 return -EINVAL;
2901         }
2902
2903         refcount_inc(&trigger_node->refcount);
2904
2905         return 0;
2906 }
2907
2908 void
2909 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2910                                      const struct mlxsw_sp_sample_trigger *trigger)
2911 {
2912         struct mlxsw_sp_sample_trigger_node *trigger_node;
2913         struct mlxsw_sp_sample_trigger key;
2914
2915         ASSERT_RTNL();
2916
2917         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2918
2919         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2920                                               &key,
2921                                               mlxsw_sp_sample_trigger_ht_params);
2922         if (!trigger_node)
2923                 return;
2924
2925         if (!refcount_dec_and_test(&trigger_node->refcount))
2926                 return;
2927
2928         mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2929 }
2930
2931 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2932                                     unsigned long event, void *ptr);
2933
2934 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
2935 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
2936 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
2937
2938 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
2939 {
2940         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
2941         mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
2942         mutex_init(&mlxsw_sp->parsing.lock);
2943 }
2944
2945 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
2946 {
2947         mutex_destroy(&mlxsw_sp->parsing.lock);
2948 }
2949
2950 struct mlxsw_sp_ipv6_addr_node {
2951         struct in6_addr key;
2952         struct rhash_head ht_node;
2953         u32 kvdl_index;
2954         refcount_t refcount;
2955 };
2956
2957 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
2958         .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
2959         .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
2960         .key_len = sizeof(struct in6_addr),
2961         .automatic_shrinking = true,
2962 };
2963
2964 static int
2965 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
2966                         u32 *p_kvdl_index)
2967 {
2968         struct mlxsw_sp_ipv6_addr_node *node;
2969         char rips_pl[MLXSW_REG_RIPS_LEN];
2970         int err;
2971
2972         err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
2973                                   MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
2974                                   p_kvdl_index);
2975         if (err)
2976                 return err;
2977
2978         mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
2979         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
2980         if (err)
2981                 goto err_rips_write;
2982
2983         node = kzalloc(sizeof(*node), GFP_KERNEL);
2984         if (!node) {
2985                 err = -ENOMEM;
2986                 goto err_node_alloc;
2987         }
2988
2989         node->key = *addr6;
2990         node->kvdl_index = *p_kvdl_index;
2991         refcount_set(&node->refcount, 1);
2992
2993         err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
2994                                      &node->ht_node,
2995                                      mlxsw_sp_ipv6_addr_ht_params);
2996         if (err)
2997                 goto err_rhashtable_insert;
2998
2999         return 0;
3000
3001 err_rhashtable_insert:
3002         kfree(node);
3003 err_node_alloc:
3004 err_rips_write:
3005         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3006                            *p_kvdl_index);
3007         return err;
3008 }
3009
3010 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3011                                     struct mlxsw_sp_ipv6_addr_node *node)
3012 {
3013         u32 kvdl_index = node->kvdl_index;
3014
3015         rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
3016                                mlxsw_sp_ipv6_addr_ht_params);
3017         kfree(node);
3018         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3019                            kvdl_index);
3020 }
3021
3022 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3023                                       const struct in6_addr *addr6,
3024                                       u32 *p_kvdl_index)
3025 {
3026         struct mlxsw_sp_ipv6_addr_node *node;
3027         int err = 0;
3028
3029         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3030         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3031                                       mlxsw_sp_ipv6_addr_ht_params);
3032         if (node) {
3033                 refcount_inc(&node->refcount);
3034                 *p_kvdl_index = node->kvdl_index;
3035                 goto out_unlock;
3036         }
3037
3038         err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3039
3040 out_unlock:
3041         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3042         return err;
3043 }
3044
3045 void
3046 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3047 {
3048         struct mlxsw_sp_ipv6_addr_node *node;
3049
3050         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3051         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3052                                       mlxsw_sp_ipv6_addr_ht_params);
3053         if (WARN_ON(!node))
3054                 goto out_unlock;
3055
3056         if (!refcount_dec_and_test(&node->refcount))
3057                 goto out_unlock;
3058
3059         mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3060
3061 out_unlock:
3062         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3063 }
3064
3065 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3066 {
3067         int err;
3068
3069         err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3070                               &mlxsw_sp_ipv6_addr_ht_params);
3071         if (err)
3072                 return err;
3073
3074         mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3075         return 0;
3076 }
3077
3078 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3079 {
3080         mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3081         rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3082 }
3083
3084 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3085                          const struct mlxsw_bus_info *mlxsw_bus_info,
3086                          struct netlink_ext_ack *extack)
3087 {
3088         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3089         int err;
3090
3091         mlxsw_sp->core = mlxsw_core;
3092         mlxsw_sp->bus_info = mlxsw_bus_info;
3093
3094         mlxsw_sp_parsing_init(mlxsw_sp);
3095
3096         err = mlxsw_sp_base_mac_get(mlxsw_sp);
3097         if (err) {
3098                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3099                 return err;
3100         }
3101
3102         err = mlxsw_sp_kvdl_init(mlxsw_sp);
3103         if (err) {
3104                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3105                 return err;
3106         }
3107
3108         err = mlxsw_sp_pgt_init(mlxsw_sp);
3109         if (err) {
3110                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3111                 goto err_pgt_init;
3112         }
3113
3114         err = mlxsw_sp_fids_init(mlxsw_sp);
3115         if (err) {
3116                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3117                 goto err_fids_init;
3118         }
3119
3120         err = mlxsw_sp_policers_init(mlxsw_sp);
3121         if (err) {
3122                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3123                 goto err_policers_init;
3124         }
3125
3126         err = mlxsw_sp_traps_init(mlxsw_sp);
3127         if (err) {
3128                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3129                 goto err_traps_init;
3130         }
3131
3132         err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3133         if (err) {
3134                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3135                 goto err_devlink_traps_init;
3136         }
3137
3138         err = mlxsw_sp_buffers_init(mlxsw_sp);
3139         if (err) {
3140                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3141                 goto err_buffers_init;
3142         }
3143
3144         err = mlxsw_sp_lag_init(mlxsw_sp);
3145         if (err) {
3146                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3147                 goto err_lag_init;
3148         }
3149
3150         /* Initialize SPAN before router and switchdev, so that those components
3151          * can call mlxsw_sp_span_respin().
3152          */
3153         err = mlxsw_sp_span_init(mlxsw_sp);
3154         if (err) {
3155                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3156                 goto err_span_init;
3157         }
3158
3159         err = mlxsw_sp_switchdev_init(mlxsw_sp);
3160         if (err) {
3161                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3162                 goto err_switchdev_init;
3163         }
3164
3165         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3166         if (err) {
3167                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3168                 goto err_counter_pool_init;
3169         }
3170
3171         err = mlxsw_sp_afa_init(mlxsw_sp);
3172         if (err) {
3173                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3174                 goto err_afa_init;
3175         }
3176
3177         err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3178         if (err) {
3179                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3180                 goto err_ipv6_addr_ht_init;
3181         }
3182
3183         err = mlxsw_sp_nve_init(mlxsw_sp);
3184         if (err) {
3185                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3186                 goto err_nve_init;
3187         }
3188
3189         err = mlxsw_sp_acl_init(mlxsw_sp);
3190         if (err) {
3191                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3192                 goto err_acl_init;
3193         }
3194
3195         err = mlxsw_sp_router_init(mlxsw_sp, extack);
3196         if (err) {
3197                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3198                 goto err_router_init;
3199         }
3200
3201         if (mlxsw_sp->bus_info->read_clock_capable) {
3202                 /* NULL is a valid return value from clock_init */
3203                 mlxsw_sp->clock =
3204                         mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3205                                                       mlxsw_sp->bus_info->dev);
3206                 if (IS_ERR(mlxsw_sp->clock)) {
3207                         err = PTR_ERR(mlxsw_sp->clock);
3208                         dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3209                         goto err_ptp_clock_init;
3210                 }
3211         }
3212
3213         if (mlxsw_sp->clock) {
3214                 /* NULL is a valid return value from ptp_ops->init */
3215                 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3216                 if (IS_ERR(mlxsw_sp->ptp_state)) {
3217                         err = PTR_ERR(mlxsw_sp->ptp_state);
3218                         dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3219                         goto err_ptp_init;
3220                 }
3221         }
3222
3223         /* Initialize netdevice notifier after SPAN is initialized, so that the
3224          * event handler can call SPAN respin.
3225          */
3226         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3227         err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3228                                               &mlxsw_sp->netdevice_nb);
3229         if (err) {
3230                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3231                 goto err_netdev_notifier;
3232         }
3233
3234         err = mlxsw_sp_dpipe_init(mlxsw_sp);
3235         if (err) {
3236                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3237                 goto err_dpipe_init;
3238         }
3239
3240         err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3241         if (err) {
3242                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3243                 goto err_port_module_info_init;
3244         }
3245
3246         err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3247                               &mlxsw_sp_sample_trigger_ht_params);
3248         if (err) {
3249                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3250                 goto err_sample_trigger_init;
3251         }
3252
3253         err = mlxsw_sp_ports_create(mlxsw_sp);
3254         if (err) {
3255                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3256                 goto err_ports_create;
3257         }
3258
3259         return 0;
3260
3261 err_ports_create:
3262         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3263 err_sample_trigger_init:
3264         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3265 err_port_module_info_init:
3266         mlxsw_sp_dpipe_fini(mlxsw_sp);
3267 err_dpipe_init:
3268         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3269                                           &mlxsw_sp->netdevice_nb);
3270 err_netdev_notifier:
3271         if (mlxsw_sp->clock)
3272                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3273 err_ptp_init:
3274         if (mlxsw_sp->clock)
3275                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3276 err_ptp_clock_init:
3277         mlxsw_sp_router_fini(mlxsw_sp);
3278 err_router_init:
3279         mlxsw_sp_acl_fini(mlxsw_sp);
3280 err_acl_init:
3281         mlxsw_sp_nve_fini(mlxsw_sp);
3282 err_nve_init:
3283         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3284 err_ipv6_addr_ht_init:
3285         mlxsw_sp_afa_fini(mlxsw_sp);
3286 err_afa_init:
3287         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3288 err_counter_pool_init:
3289         mlxsw_sp_switchdev_fini(mlxsw_sp);
3290 err_switchdev_init:
3291         mlxsw_sp_span_fini(mlxsw_sp);
3292 err_span_init:
3293         mlxsw_sp_lag_fini(mlxsw_sp);
3294 err_lag_init:
3295         mlxsw_sp_buffers_fini(mlxsw_sp);
3296 err_buffers_init:
3297         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3298 err_devlink_traps_init:
3299         mlxsw_sp_traps_fini(mlxsw_sp);
3300 err_traps_init:
3301         mlxsw_sp_policers_fini(mlxsw_sp);
3302 err_policers_init:
3303         mlxsw_sp_fids_fini(mlxsw_sp);
3304 err_fids_init:
3305         mlxsw_sp_pgt_fini(mlxsw_sp);
3306 err_pgt_init:
3307         mlxsw_sp_kvdl_fini(mlxsw_sp);
3308         mlxsw_sp_parsing_fini(mlxsw_sp);
3309         return err;
3310 }
3311
3312 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3313                           const struct mlxsw_bus_info *mlxsw_bus_info,
3314                           struct netlink_ext_ack *extack)
3315 {
3316         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3317
3318         mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3319         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3320         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3321         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3322         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3323         mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3324         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3325         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3326         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3327         mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3328         mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3329         mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3330         mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3331         mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3332         mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3333         mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3334         mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3335         mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3336         mlxsw_sp->listeners = mlxsw_sp1_listener;
3337         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3338         mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
3339         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3340         mlxsw_sp->pgt_smpe_index_valid = true;
3341
3342         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3343 }
3344
3345 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3346                           const struct mlxsw_bus_info *mlxsw_bus_info,
3347                           struct netlink_ext_ack *extack)
3348 {
3349         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3350
3351         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3352         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3353         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3354         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3355         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3356         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3357         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3358         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3359         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3360         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3361         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3362         mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3363         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3364         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3365         mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3366         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3367         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3368         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3369         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3370         mlxsw_sp->listeners = mlxsw_sp2_listener;
3371         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3372         mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3373         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3374         mlxsw_sp->pgt_smpe_index_valid = false;
3375
3376         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3377 }
3378
3379 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3380                           const struct mlxsw_bus_info *mlxsw_bus_info,
3381                           struct netlink_ext_ack *extack)
3382 {
3383         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3384
3385         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3386         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3387         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3388         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3389         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3390         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3391         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3392         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3393         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3394         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3395         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3396         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3397         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3398         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3399         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3400         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3401         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3402         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3403         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3404         mlxsw_sp->listeners = mlxsw_sp2_listener;
3405         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3406         mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3407         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3408         mlxsw_sp->pgt_smpe_index_valid = false;
3409
3410         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3411 }
3412
3413 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3414                           const struct mlxsw_bus_info *mlxsw_bus_info,
3415                           struct netlink_ext_ack *extack)
3416 {
3417         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3418
3419         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3420         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3421         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3422         mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3423         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3424         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3425         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3426         mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3427         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3428         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3429         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3430         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3431         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3432         mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3433         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3434         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3435         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3436         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3437         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3438         mlxsw_sp->listeners = mlxsw_sp2_listener;
3439         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3440         mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3441         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3442         mlxsw_sp->pgt_smpe_index_valid = false;
3443
3444         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3445 }
3446
3447 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3448 {
3449         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3450
3451         mlxsw_sp_ports_remove(mlxsw_sp);
3452         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3453         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3454         mlxsw_sp_dpipe_fini(mlxsw_sp);
3455         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3456                                           &mlxsw_sp->netdevice_nb);
3457         if (mlxsw_sp->clock) {
3458                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3459                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3460         }
3461         mlxsw_sp_router_fini(mlxsw_sp);
3462         mlxsw_sp_acl_fini(mlxsw_sp);
3463         mlxsw_sp_nve_fini(mlxsw_sp);
3464         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3465         mlxsw_sp_afa_fini(mlxsw_sp);
3466         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3467         mlxsw_sp_switchdev_fini(mlxsw_sp);
3468         mlxsw_sp_span_fini(mlxsw_sp);
3469         mlxsw_sp_lag_fini(mlxsw_sp);
3470         mlxsw_sp_buffers_fini(mlxsw_sp);
3471         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3472         mlxsw_sp_traps_fini(mlxsw_sp);
3473         mlxsw_sp_policers_fini(mlxsw_sp);
3474         mlxsw_sp_fids_fini(mlxsw_sp);
3475         mlxsw_sp_pgt_fini(mlxsw_sp);
3476         mlxsw_sp_kvdl_fini(mlxsw_sp);
3477         mlxsw_sp_parsing_fini(mlxsw_sp);
3478 }
3479
3480 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3481         .used_flood_mode                = 1,
3482         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3483         .used_max_ib_mc                 = 1,
3484         .max_ib_mc                      = 0,
3485         .used_max_pkey                  = 1,
3486         .max_pkey                       = 0,
3487         .used_ubridge                   = 1,
3488         .ubridge                        = 1,
3489         .used_kvd_sizes                 = 1,
3490         .kvd_hash_single_parts          = 59,
3491         .kvd_hash_double_parts          = 41,
3492         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
3493         .swid_config                    = {
3494                 {
3495                         .used_type      = 1,
3496                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3497                 }
3498         },
3499 };
3500
3501 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3502         .used_flood_mode                = 1,
3503         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3504         .used_max_ib_mc                 = 1,
3505         .max_ib_mc                      = 0,
3506         .used_max_pkey                  = 1,
3507         .max_pkey                       = 0,
3508         .used_ubridge                   = 1,
3509         .ubridge                        = 1,
3510         .swid_config                    = {
3511                 {
3512                         .used_type      = 1,
3513                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3514                 }
3515         },
3516         .used_cqe_time_stamp_type       = 1,
3517         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3518 };
3519
3520 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3521  * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3522  * table.
3523  */
3524 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3525
3526 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3527         .used_max_lag                   = 1,
3528         .max_lag                        = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3529         .used_flood_mode                = 1,
3530         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3531         .used_max_ib_mc                 = 1,
3532         .max_ib_mc                      = 0,
3533         .used_max_pkey                  = 1,
3534         .max_pkey                       = 0,
3535         .used_ubridge                   = 1,
3536         .ubridge                        = 1,
3537         .swid_config                    = {
3538                 {
3539                         .used_type      = 1,
3540                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3541                 }
3542         },
3543         .used_cqe_time_stamp_type       = 1,
3544         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3545 };
3546
3547 static void
3548 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3549                                       struct devlink_resource_size_params *kvd_size_params,
3550                                       struct devlink_resource_size_params *linear_size_params,
3551                                       struct devlink_resource_size_params *hash_double_size_params,
3552                                       struct devlink_resource_size_params *hash_single_size_params)
3553 {
3554         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3555                                                  KVD_SINGLE_MIN_SIZE);
3556         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3557                                                  KVD_DOUBLE_MIN_SIZE);
3558         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3559         u32 linear_size_min = 0;
3560
3561         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3562                                           MLXSW_SP_KVD_GRANULARITY,
3563                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3564         devlink_resource_size_params_init(linear_size_params, linear_size_min,
3565                                           kvd_size - single_size_min -
3566                                           double_size_min,
3567                                           MLXSW_SP_KVD_GRANULARITY,
3568                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3569         devlink_resource_size_params_init(hash_double_size_params,
3570                                           double_size_min,
3571                                           kvd_size - single_size_min -
3572                                           linear_size_min,
3573                                           MLXSW_SP_KVD_GRANULARITY,
3574                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3575         devlink_resource_size_params_init(hash_single_size_params,
3576                                           single_size_min,
3577                                           kvd_size - double_size_min -
3578                                           linear_size_min,
3579                                           MLXSW_SP_KVD_GRANULARITY,
3580                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3581 }
3582
3583 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3584 {
3585         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3586         struct devlink_resource_size_params hash_single_size_params;
3587         struct devlink_resource_size_params hash_double_size_params;
3588         struct devlink_resource_size_params linear_size_params;
3589         struct devlink_resource_size_params kvd_size_params;
3590         u32 kvd_size, single_size, double_size, linear_size;
3591         const struct mlxsw_config_profile *profile;
3592         int err;
3593
3594         profile = &mlxsw_sp1_config_profile;
3595         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3596                 return -EIO;
3597
3598         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3599                                               &linear_size_params,
3600                                               &hash_double_size_params,
3601                                               &hash_single_size_params);
3602
3603         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3604         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3605                                      kvd_size, MLXSW_SP_RESOURCE_KVD,
3606                                      DEVLINK_RESOURCE_ID_PARENT_TOP,
3607                                      &kvd_size_params);
3608         if (err)
3609                 return err;
3610
3611         linear_size = profile->kvd_linear_size;
3612         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3613                                      linear_size,
3614                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3615                                      MLXSW_SP_RESOURCE_KVD,
3616                                      &linear_size_params);
3617         if (err)
3618                 return err;
3619
3620         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3621         if  (err)
3622                 return err;
3623
3624         double_size = kvd_size - linear_size;
3625         double_size *= profile->kvd_hash_double_parts;
3626         double_size /= profile->kvd_hash_double_parts +
3627                        profile->kvd_hash_single_parts;
3628         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3629         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3630                                      double_size,
3631                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3632                                      MLXSW_SP_RESOURCE_KVD,
3633                                      &hash_double_size_params);
3634         if (err)
3635                 return err;
3636
3637         single_size = kvd_size - double_size - linear_size;
3638         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3639                                      single_size,
3640                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3641                                      MLXSW_SP_RESOURCE_KVD,
3642                                      &hash_single_size_params);
3643         if (err)
3644                 return err;
3645
3646         return 0;
3647 }
3648
3649 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3650 {
3651         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3652         struct devlink_resource_size_params kvd_size_params;
3653         u32 kvd_size;
3654
3655         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3656                 return -EIO;
3657
3658         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3659         devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3660                                           MLXSW_SP_KVD_GRANULARITY,
3661                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3662
3663         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3664                                       kvd_size, MLXSW_SP_RESOURCE_KVD,
3665                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3666                                       &kvd_size_params);
3667 }
3668
3669 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3670 {
3671         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3672         struct devlink_resource_size_params span_size_params;
3673         u32 max_span;
3674
3675         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3676                 return -EIO;
3677
3678         max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3679         devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3680                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3681
3682         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3683                                       max_span, MLXSW_SP_RESOURCE_SPAN,
3684                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3685                                       &span_size_params);
3686 }
3687
3688 static int
3689 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3690 {
3691         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3692         struct devlink_resource_size_params size_params;
3693         u8 max_rif_mac_profiles;
3694
3695         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3696                 max_rif_mac_profiles = 1;
3697         else
3698                 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3699                                                           MAX_RIF_MAC_PROFILES);
3700         devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3701                                           max_rif_mac_profiles, 1,
3702                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3703
3704         return devl_resource_register(devlink,
3705                                       "rif_mac_profiles",
3706                                       max_rif_mac_profiles,
3707                                       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3708                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3709                                       &size_params);
3710 }
3711
3712 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3713 {
3714         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3715         struct devlink_resource_size_params size_params;
3716         u64 max_rifs;
3717
3718         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3719                 return -EIO;
3720
3721         max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3722         devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3723                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3724
3725         return devl_resource_register(devlink, "rifs", max_rifs,
3726                                       MLXSW_SP_RESOURCE_RIFS,
3727                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3728                                       &size_params);
3729 }
3730
3731 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3732 {
3733         int err;
3734
3735         err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3736         if (err)
3737                 return err;
3738
3739         err = mlxsw_sp_resources_span_register(mlxsw_core);
3740         if (err)
3741                 goto err_resources_span_register;
3742
3743         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3744         if (err)
3745                 goto err_resources_counter_register;
3746
3747         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3748         if (err)
3749                 goto err_policer_resources_register;
3750
3751         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3752         if (err)
3753                 goto err_resources_rif_mac_profile_register;
3754
3755         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3756         if (err)
3757                 goto err_resources_rifs_register;
3758
3759         return 0;
3760
3761 err_resources_rifs_register:
3762 err_resources_rif_mac_profile_register:
3763 err_policer_resources_register:
3764 err_resources_counter_register:
3765 err_resources_span_register:
3766         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3767         return err;
3768 }
3769
3770 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3771 {
3772         int err;
3773
3774         err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3775         if (err)
3776                 return err;
3777
3778         err = mlxsw_sp_resources_span_register(mlxsw_core);
3779         if (err)
3780                 goto err_resources_span_register;
3781
3782         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3783         if (err)
3784                 goto err_resources_counter_register;
3785
3786         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3787         if (err)
3788                 goto err_policer_resources_register;
3789
3790         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3791         if (err)
3792                 goto err_resources_rif_mac_profile_register;
3793
3794         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3795         if (err)
3796                 goto err_resources_rifs_register;
3797
3798         return 0;
3799
3800 err_resources_rifs_register:
3801 err_resources_rif_mac_profile_register:
3802 err_policer_resources_register:
3803 err_resources_counter_register:
3804 err_resources_span_register:
3805         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3806         return err;
3807 }
3808
3809 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3810                                   const struct mlxsw_config_profile *profile,
3811                                   u64 *p_single_size, u64 *p_double_size,
3812                                   u64 *p_linear_size)
3813 {
3814         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3815         u32 double_size;
3816         int err;
3817
3818         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3819             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3820                 return -EIO;
3821
3822         /* The hash part is what left of the kvd without the
3823          * linear part. It is split to the single size and
3824          * double size by the parts ratio from the profile.
3825          * Both sizes must be a multiplications of the
3826          * granularity from the profile. In case the user
3827          * provided the sizes they are obtained via devlink.
3828          */
3829         err = devl_resource_size_get(devlink,
3830                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3831                                      p_linear_size);
3832         if (err)
3833                 *p_linear_size = profile->kvd_linear_size;
3834
3835         err = devl_resource_size_get(devlink,
3836                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3837                                      p_double_size);
3838         if (err) {
3839                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3840                               *p_linear_size;
3841                 double_size *= profile->kvd_hash_double_parts;
3842                 double_size /= profile->kvd_hash_double_parts +
3843                                profile->kvd_hash_single_parts;
3844                 *p_double_size = rounddown(double_size,
3845                                            MLXSW_SP_KVD_GRANULARITY);
3846         }
3847
3848         err = devl_resource_size_get(devlink,
3849                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3850                                      p_single_size);
3851         if (err)
3852                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3853                                  *p_double_size - *p_linear_size;
3854
3855         /* Check results are legal. */
3856         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3857             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3858             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3859                 return -EIO;
3860
3861         return 0;
3862 }
3863
3864 static int
3865 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3866                                              struct devlink_param_gset_ctx *ctx)
3867 {
3868         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3869         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3870
3871         ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3872         return 0;
3873 }
3874
3875 static int
3876 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3877                                              struct devlink_param_gset_ctx *ctx)
3878 {
3879         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3880         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3881
3882         return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3883 }
3884
3885 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3886         DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3887                              "acl_region_rehash_interval",
3888                              DEVLINK_PARAM_TYPE_U32,
3889                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3890                              mlxsw_sp_params_acl_region_rehash_intrvl_get,
3891                              mlxsw_sp_params_acl_region_rehash_intrvl_set,
3892                              NULL),
3893 };
3894
3895 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3896 {
3897         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3898         union devlink_param_value value;
3899         int err;
3900
3901         err = devl_params_register(devlink, mlxsw_sp2_devlink_params,
3902                                    ARRAY_SIZE(mlxsw_sp2_devlink_params));
3903         if (err)
3904                 return err;
3905
3906         value.vu32 = 0;
3907         devl_param_driverinit_value_set(devlink,
3908                                         MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3909                                         value);
3910         return 0;
3911 }
3912
3913 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3914 {
3915         devl_params_unregister(priv_to_devlink(mlxsw_core),
3916                                mlxsw_sp2_devlink_params,
3917                                ARRAY_SIZE(mlxsw_sp2_devlink_params));
3918 }
3919
3920 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3921                                      struct sk_buff *skb, u16 local_port)
3922 {
3923         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3924
3925         skb_pull(skb, MLXSW_TXHDR_LEN);
3926         mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3927 }
3928
3929 static struct mlxsw_driver mlxsw_sp1_driver = {
3930         .kind                           = mlxsw_sp1_driver_name,
3931         .priv_size                      = sizeof(struct mlxsw_sp),
3932         .fw_req_rev                     = &mlxsw_sp1_fw_rev,
3933         .fw_filename                    = MLXSW_SP1_FW_FILENAME,
3934         .init                           = mlxsw_sp1_init,
3935         .fini                           = mlxsw_sp_fini,
3936         .port_split                     = mlxsw_sp_port_split,
3937         .port_unsplit                   = mlxsw_sp_port_unsplit,
3938         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3939         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3940         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3941         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3942         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3943         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3944         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3945         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3946         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3947         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3948         .trap_init                      = mlxsw_sp_trap_init,
3949         .trap_fini                      = mlxsw_sp_trap_fini,
3950         .trap_action_set                = mlxsw_sp_trap_action_set,
3951         .trap_group_init                = mlxsw_sp_trap_group_init,
3952         .trap_group_set                 = mlxsw_sp_trap_group_set,
3953         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3954         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3955         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3956         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3957         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3958         .resources_register             = mlxsw_sp1_resources_register,
3959         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
3960         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3961         .txhdr_len                      = MLXSW_TXHDR_LEN,
3962         .profile                        = &mlxsw_sp1_config_profile,
3963         .sdq_supports_cqe_v2            = false,
3964 };
3965
3966 static struct mlxsw_driver mlxsw_sp2_driver = {
3967         .kind                           = mlxsw_sp2_driver_name,
3968         .priv_size                      = sizeof(struct mlxsw_sp),
3969         .fw_req_rev                     = &mlxsw_sp2_fw_rev,
3970         .fw_filename                    = MLXSW_SP2_FW_FILENAME,
3971         .init                           = mlxsw_sp2_init,
3972         .fini                           = mlxsw_sp_fini,
3973         .port_split                     = mlxsw_sp_port_split,
3974         .port_unsplit                   = mlxsw_sp_port_unsplit,
3975         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
3976         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3977         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3978         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3979         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3980         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3981         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3982         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3983         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3984         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3985         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3986         .trap_init                      = mlxsw_sp_trap_init,
3987         .trap_fini                      = mlxsw_sp_trap_fini,
3988         .trap_action_set                = mlxsw_sp_trap_action_set,
3989         .trap_group_init                = mlxsw_sp_trap_group_init,
3990         .trap_group_set                 = mlxsw_sp_trap_group_set,
3991         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3992         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3993         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3994         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3995         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3996         .resources_register             = mlxsw_sp2_resources_register,
3997         .params_register                = mlxsw_sp2_params_register,
3998         .params_unregister              = mlxsw_sp2_params_unregister,
3999         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4000         .txhdr_len                      = MLXSW_TXHDR_LEN,
4001         .profile                        = &mlxsw_sp2_config_profile,
4002         .sdq_supports_cqe_v2            = true,
4003 };
4004
4005 static struct mlxsw_driver mlxsw_sp3_driver = {
4006         .kind                           = mlxsw_sp3_driver_name,
4007         .priv_size                      = sizeof(struct mlxsw_sp),
4008         .fw_req_rev                     = &mlxsw_sp3_fw_rev,
4009         .fw_filename                    = MLXSW_SP3_FW_FILENAME,
4010         .init                           = mlxsw_sp3_init,
4011         .fini                           = mlxsw_sp_fini,
4012         .port_split                     = mlxsw_sp_port_split,
4013         .port_unsplit                   = mlxsw_sp_port_unsplit,
4014         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4015         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4016         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4017         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4018         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4019         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4020         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4021         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4022         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4023         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4024         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4025         .trap_init                      = mlxsw_sp_trap_init,
4026         .trap_fini                      = mlxsw_sp_trap_fini,
4027         .trap_action_set                = mlxsw_sp_trap_action_set,
4028         .trap_group_init                = mlxsw_sp_trap_group_init,
4029         .trap_group_set                 = mlxsw_sp_trap_group_set,
4030         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4031         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4032         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4033         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4034         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4035         .resources_register             = mlxsw_sp2_resources_register,
4036         .params_register                = mlxsw_sp2_params_register,
4037         .params_unregister              = mlxsw_sp2_params_unregister,
4038         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4039         .txhdr_len                      = MLXSW_TXHDR_LEN,
4040         .profile                        = &mlxsw_sp2_config_profile,
4041         .sdq_supports_cqe_v2            = true,
4042 };
4043
4044 static struct mlxsw_driver mlxsw_sp4_driver = {
4045         .kind                           = mlxsw_sp4_driver_name,
4046         .priv_size                      = sizeof(struct mlxsw_sp),
4047         .init                           = mlxsw_sp4_init,
4048         .fini                           = mlxsw_sp_fini,
4049         .port_split                     = mlxsw_sp_port_split,
4050         .port_unsplit                   = mlxsw_sp_port_unsplit,
4051         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4052         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4053         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4054         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4055         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4056         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4057         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4058         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4059         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4060         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4061         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4062         .trap_init                      = mlxsw_sp_trap_init,
4063         .trap_fini                      = mlxsw_sp_trap_fini,
4064         .trap_action_set                = mlxsw_sp_trap_action_set,
4065         .trap_group_init                = mlxsw_sp_trap_group_init,
4066         .trap_group_set                 = mlxsw_sp_trap_group_set,
4067         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4068         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4069         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4070         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4071         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4072         .resources_register             = mlxsw_sp2_resources_register,
4073         .params_register                = mlxsw_sp2_params_register,
4074         .params_unregister              = mlxsw_sp2_params_unregister,
4075         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4076         .txhdr_len                      = MLXSW_TXHDR_LEN,
4077         .profile                        = &mlxsw_sp4_config_profile,
4078         .sdq_supports_cqe_v2            = true,
4079 };
4080
4081 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4082 {
4083         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4084 }
4085
4086 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4087                                    struct netdev_nested_priv *priv)
4088 {
4089         int ret = 0;
4090
4091         if (mlxsw_sp_port_dev_check(lower_dev)) {
4092                 priv->data = (void *)netdev_priv(lower_dev);
4093                 ret = 1;
4094         }
4095
4096         return ret;
4097 }
4098
4099 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4100 {
4101         struct netdev_nested_priv priv = {
4102                 .data = NULL,
4103         };
4104
4105         if (mlxsw_sp_port_dev_check(dev))
4106                 return netdev_priv(dev);
4107
4108         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4109
4110         return (struct mlxsw_sp_port *)priv.data;
4111 }
4112
4113 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4114 {
4115         struct mlxsw_sp_port *mlxsw_sp_port;
4116
4117         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4118         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4119 }
4120
4121 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4122 {
4123         struct netdev_nested_priv priv = {
4124                 .data = NULL,
4125         };
4126
4127         if (mlxsw_sp_port_dev_check(dev))
4128                 return netdev_priv(dev);
4129
4130         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4131                                       &priv);
4132
4133         return (struct mlxsw_sp_port *)priv.data;
4134 }
4135
4136 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
4137 {
4138         struct mlxsw_sp_port *mlxsw_sp_port;
4139
4140         rcu_read_lock();
4141         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
4142         if (mlxsw_sp_port)
4143                 dev_hold(mlxsw_sp_port->dev);
4144         rcu_read_unlock();
4145         return mlxsw_sp_port;
4146 }
4147
4148 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
4149 {
4150         dev_put(mlxsw_sp_port->dev);
4151 }
4152
4153 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4154 {
4155         char mprs_pl[MLXSW_REG_MPRS_LEN];
4156         int err = 0;
4157
4158         mutex_lock(&mlxsw_sp->parsing.lock);
4159
4160         if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4161                 goto out_unlock;
4162
4163         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4164                             mlxsw_sp->parsing.vxlan_udp_dport);
4165         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4166         if (err)
4167                 goto out_unlock;
4168
4169         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4170         refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4171
4172 out_unlock:
4173         mutex_unlock(&mlxsw_sp->parsing.lock);
4174         return err;
4175 }
4176
4177 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4178 {
4179         char mprs_pl[MLXSW_REG_MPRS_LEN];
4180
4181         mutex_lock(&mlxsw_sp->parsing.lock);
4182
4183         if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4184                 goto out_unlock;
4185
4186         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4187                             mlxsw_sp->parsing.vxlan_udp_dport);
4188         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4189         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4190
4191 out_unlock:
4192         mutex_unlock(&mlxsw_sp->parsing.lock);
4193 }
4194
4195 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4196                                          __be16 udp_dport)
4197 {
4198         char mprs_pl[MLXSW_REG_MPRS_LEN];
4199         int err;
4200
4201         mutex_lock(&mlxsw_sp->parsing.lock);
4202
4203         mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4204                             be16_to_cpu(udp_dport));
4205         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4206         if (err)
4207                 goto out_unlock;
4208
4209         mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4210
4211 out_unlock:
4212         mutex_unlock(&mlxsw_sp->parsing.lock);
4213         return err;
4214 }
4215
4216 static void
4217 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4218                                  struct net_device *lag_dev)
4219 {
4220         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4221         struct net_device *upper_dev;
4222         struct list_head *iter;
4223
4224         if (netif_is_bridge_port(lag_dev))
4225                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4226
4227         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4228                 if (!netif_is_bridge_port(upper_dev))
4229                         continue;
4230                 br_dev = netdev_master_upper_dev_get(upper_dev);
4231                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4232         }
4233 }
4234
4235 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4236 {
4237         char sldr_pl[MLXSW_REG_SLDR_LEN];
4238
4239         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4240         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4241 }
4242
4243 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4244 {
4245         char sldr_pl[MLXSW_REG_SLDR_LEN];
4246
4247         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4248         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4249 }
4250
4251 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4252                                      u16 lag_id, u8 port_index)
4253 {
4254         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4255         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4256
4257         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4258                                       lag_id, port_index);
4259         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4260 }
4261
4262 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4263                                         u16 lag_id)
4264 {
4265         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4266         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4267
4268         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4269                                          lag_id);
4270         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4271 }
4272
4273 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4274                                         u16 lag_id)
4275 {
4276         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4277         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4278
4279         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4280                                         lag_id);
4281         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4282 }
4283
4284 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4285                                          u16 lag_id)
4286 {
4287         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4288         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4289
4290         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4291                                          lag_id);
4292         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4293 }
4294
4295 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4296                                   struct net_device *lag_dev,
4297                                   u16 *p_lag_id)
4298 {
4299         struct mlxsw_sp_upper *lag;
4300         int free_lag_id = -1;
4301         u16 max_lag;
4302         int err, i;
4303
4304         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
4305         if (err)
4306                 return err;
4307
4308         for (i = 0; i < max_lag; i++) {
4309                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4310                 if (lag->ref_count) {
4311                         if (lag->dev == lag_dev) {
4312                                 *p_lag_id = i;
4313                                 return 0;
4314                         }
4315                 } else if (free_lag_id < 0) {
4316                         free_lag_id = i;
4317                 }
4318         }
4319         if (free_lag_id < 0)
4320                 return -EBUSY;
4321         *p_lag_id = free_lag_id;
4322         return 0;
4323 }
4324
4325 static bool
4326 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4327                           struct net_device *lag_dev,
4328                           struct netdev_lag_upper_info *lag_upper_info,
4329                           struct netlink_ext_ack *extack)
4330 {
4331         u16 lag_id;
4332
4333         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4334                 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4335                 return false;
4336         }
4337         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4338                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4339                 return false;
4340         }
4341         return true;
4342 }
4343
4344 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4345                                        u16 lag_id, u8 *p_port_index)
4346 {
4347         u64 max_lag_members;
4348         int i;
4349
4350         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4351                                              MAX_LAG_MEMBERS);
4352         for (i = 0; i < max_lag_members; i++) {
4353                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4354                         *p_port_index = i;
4355                         return 0;
4356                 }
4357         }
4358         return -EBUSY;
4359 }
4360
4361 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4362                                   struct net_device *lag_dev,
4363                                   struct netlink_ext_ack *extack)
4364 {
4365         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4366         struct mlxsw_sp_upper *lag;
4367         u16 lag_id;
4368         u8 port_index;
4369         int err;
4370
4371         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4372         if (err)
4373                 return err;
4374         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4375         if (!lag->ref_count) {
4376                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4377                 if (err)
4378                         return err;
4379                 lag->dev = lag_dev;
4380         }
4381
4382         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4383         if (err)
4384                 return err;
4385         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4386         if (err)
4387                 goto err_col_port_add;
4388
4389         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4390                                    mlxsw_sp_port->local_port);
4391         mlxsw_sp_port->lag_id = lag_id;
4392         mlxsw_sp_port->lagged = 1;
4393         lag->ref_count++;
4394
4395         /* Port is no longer usable as a router interface */
4396         if (mlxsw_sp_port->default_vlan->fid)
4397                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4398
4399         /* Join a router interface configured on the LAG, if exists */
4400         err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
4401                                              lag_dev, extack);
4402         if (err)
4403                 goto err_router_join;
4404
4405         return 0;
4406
4407 err_router_join:
4408         lag->ref_count--;
4409         mlxsw_sp_port->lagged = 0;
4410         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4411                                      mlxsw_sp_port->local_port);
4412         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4413 err_col_port_add:
4414         if (!lag->ref_count)
4415                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4416         return err;
4417 }
4418
4419 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4420                                     struct net_device *lag_dev)
4421 {
4422         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4423         u16 lag_id = mlxsw_sp_port->lag_id;
4424         struct mlxsw_sp_upper *lag;
4425
4426         if (!mlxsw_sp_port->lagged)
4427                 return;
4428         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4429         WARN_ON(lag->ref_count == 0);
4430
4431         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4432
4433         /* Any VLANs configured on the port are no longer valid */
4434         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4435         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4436         /* Make the LAG and its directly linked uppers leave bridges they
4437          * are memeber in
4438          */
4439         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4440
4441         if (lag->ref_count == 1)
4442                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4443
4444         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4445                                      mlxsw_sp_port->local_port);
4446         mlxsw_sp_port->lagged = 0;
4447         lag->ref_count--;
4448
4449         /* Make sure untagged frames are allowed to ingress */
4450         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4451                                ETH_P_8021Q);
4452 }
4453
4454 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4455                                       u16 lag_id)
4456 {
4457         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4458         char sldr_pl[MLXSW_REG_SLDR_LEN];
4459
4460         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4461                                          mlxsw_sp_port->local_port);
4462         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4463 }
4464
4465 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4466                                          u16 lag_id)
4467 {
4468         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4469         char sldr_pl[MLXSW_REG_SLDR_LEN];
4470
4471         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4472                                             mlxsw_sp_port->local_port);
4473         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4474 }
4475
4476 static int
4477 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4478 {
4479         int err;
4480
4481         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4482                                            mlxsw_sp_port->lag_id);
4483         if (err)
4484                 return err;
4485
4486         err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4487         if (err)
4488                 goto err_dist_port_add;
4489
4490         return 0;
4491
4492 err_dist_port_add:
4493         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4494         return err;
4495 }
4496
4497 static int
4498 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4499 {
4500         int err;
4501
4502         err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4503                                             mlxsw_sp_port->lag_id);
4504         if (err)
4505                 return err;
4506
4507         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4508                                             mlxsw_sp_port->lag_id);
4509         if (err)
4510                 goto err_col_port_disable;
4511
4512         return 0;
4513
4514 err_col_port_disable:
4515         mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4516         return err;
4517 }
4518
4519 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4520                                      struct netdev_lag_lower_state_info *info)
4521 {
4522         if (info->tx_enabled)
4523                 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4524         else
4525                 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4526 }
4527
4528 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4529                                  bool enable)
4530 {
4531         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4532         enum mlxsw_reg_spms_state spms_state;
4533         char *spms_pl;
4534         u16 vid;
4535         int err;
4536
4537         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4538                               MLXSW_REG_SPMS_STATE_DISCARDING;
4539
4540         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4541         if (!spms_pl)
4542                 return -ENOMEM;
4543         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4544
4545         for (vid = 0; vid < VLAN_N_VID; vid++)
4546                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4547
4548         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4549         kfree(spms_pl);
4550         return err;
4551 }
4552
4553 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4554 {
4555         u16 vid = 1;
4556         int err;
4557
4558         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4559         if (err)
4560                 return err;
4561         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4562         if (err)
4563                 goto err_port_stp_set;
4564         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4565                                      true, false);
4566         if (err)
4567                 goto err_port_vlan_set;
4568
4569         for (; vid <= VLAN_N_VID - 1; vid++) {
4570                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4571                                                      vid, false);
4572                 if (err)
4573                         goto err_vid_learning_set;
4574         }
4575
4576         return 0;
4577
4578 err_vid_learning_set:
4579         for (vid--; vid >= 1; vid--)
4580                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4581 err_port_vlan_set:
4582         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4583 err_port_stp_set:
4584         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4585         return err;
4586 }
4587
4588 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4589 {
4590         u16 vid;
4591
4592         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4593                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4594                                                vid, true);
4595
4596         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4597                                false, false);
4598         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4599         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4600 }
4601
4602 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4603 {
4604         unsigned int num_vxlans = 0;
4605         struct net_device *dev;
4606         struct list_head *iter;
4607
4608         netdev_for_each_lower_dev(br_dev, dev, iter) {
4609                 if (netif_is_vxlan(dev))
4610                         num_vxlans++;
4611         }
4612
4613         return num_vxlans > 1;
4614 }
4615
4616 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4617 {
4618         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4619         struct net_device *dev;
4620         struct list_head *iter;
4621
4622         netdev_for_each_lower_dev(br_dev, dev, iter) {
4623                 u16 pvid;
4624                 int err;
4625
4626                 if (!netif_is_vxlan(dev))
4627                         continue;
4628
4629                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4630                 if (err || !pvid)
4631                         continue;
4632
4633                 if (test_and_set_bit(pvid, vlans))
4634                         return false;
4635         }
4636
4637         return true;
4638 }
4639
4640 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4641                                            struct netlink_ext_ack *extack)
4642 {
4643         if (br_multicast_enabled(br_dev)) {
4644                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4645                 return false;
4646         }
4647
4648         if (!br_vlan_enabled(br_dev) &&
4649             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4650                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4651                 return false;
4652         }
4653
4654         if (br_vlan_enabled(br_dev) &&
4655             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4656                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4657                 return false;
4658         }
4659
4660         return true;
4661 }
4662
4663 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4664                                                struct net_device *dev,
4665                                                unsigned long event, void *ptr)
4666 {
4667         struct netdev_notifier_changeupper_info *info;
4668         struct mlxsw_sp_port *mlxsw_sp_port;
4669         struct netlink_ext_ack *extack;
4670         struct net_device *upper_dev;
4671         struct mlxsw_sp *mlxsw_sp;
4672         int err = 0;
4673         u16 proto;
4674
4675         mlxsw_sp_port = netdev_priv(dev);
4676         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4677         info = ptr;
4678         extack = netdev_notifier_info_to_extack(&info->info);
4679
4680         switch (event) {
4681         case NETDEV_PRECHANGEUPPER:
4682                 upper_dev = info->upper_dev;
4683                 if (!is_vlan_dev(upper_dev) &&
4684                     !netif_is_lag_master(upper_dev) &&
4685                     !netif_is_bridge_master(upper_dev) &&
4686                     !netif_is_ovs_master(upper_dev) &&
4687                     !netif_is_macvlan(upper_dev) &&
4688                     !netif_is_l3_master(upper_dev)) {
4689                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4690                         return -EINVAL;
4691                 }
4692                 if (!info->linking)
4693                         break;
4694                 if (netif_is_bridge_master(upper_dev) &&
4695                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4696                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4697                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4698                         return -EOPNOTSUPP;
4699                 if (netdev_has_any_upper_dev(upper_dev) &&
4700                     (!netif_is_bridge_master(upper_dev) ||
4701                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4702                                                           upper_dev))) {
4703                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4704                         return -EINVAL;
4705                 }
4706                 if (netif_is_lag_master(upper_dev) &&
4707                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4708                                                info->upper_info, extack))
4709                         return -EINVAL;
4710                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4711                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4712                         return -EINVAL;
4713                 }
4714                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4715                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4716                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4717                         return -EINVAL;
4718                 }
4719                 if (netif_is_macvlan(upper_dev) &&
4720                     !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
4721                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4722                         return -EOPNOTSUPP;
4723                 }
4724                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4725                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4726                         return -EINVAL;
4727                 }
4728                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4729                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4730                         return -EINVAL;
4731                 }
4732                 if (netif_is_bridge_master(upper_dev)) {
4733                         br_vlan_get_proto(upper_dev, &proto);
4734                         if (br_vlan_enabled(upper_dev) &&
4735                             proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4736                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4737                                 return -EOPNOTSUPP;
4738                         }
4739                         if (vlan_uses_dev(lower_dev) &&
4740                             br_vlan_enabled(upper_dev) &&
4741                             proto == ETH_P_8021AD) {
4742                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4743                                 return -EOPNOTSUPP;
4744                         }
4745                 }
4746                 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4747                         struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4748
4749                         if (br_vlan_enabled(br_dev)) {
4750                                 br_vlan_get_proto(br_dev, &proto);
4751                                 if (proto == ETH_P_8021AD) {
4752                                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4753                                         return -EOPNOTSUPP;
4754                                 }
4755                         }
4756                 }
4757                 if (is_vlan_dev(upper_dev) &&
4758                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4759                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4760                         return -EOPNOTSUPP;
4761                 }
4762                 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
4763                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4764                         return -EOPNOTSUPP;
4765                 }
4766                 break;
4767         case NETDEV_CHANGEUPPER:
4768                 upper_dev = info->upper_dev;
4769                 if (netif_is_bridge_master(upper_dev)) {
4770                         if (info->linking)
4771                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4772                                                                 lower_dev,
4773                                                                 upper_dev,
4774                                                                 extack);
4775                         else
4776                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4777                                                            lower_dev,
4778                                                            upper_dev);
4779                 } else if (netif_is_lag_master(upper_dev)) {
4780                         if (info->linking) {
4781                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4782                                                              upper_dev, extack);
4783                         } else {
4784                                 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4785                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4786                                                         upper_dev);
4787                         }
4788                 } else if (netif_is_ovs_master(upper_dev)) {
4789                         if (info->linking)
4790                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4791                         else
4792                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4793                 } else if (netif_is_macvlan(upper_dev)) {
4794                         if (!info->linking)
4795                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4796                 } else if (is_vlan_dev(upper_dev)) {
4797                         struct net_device *br_dev;
4798
4799                         if (!netif_is_bridge_port(upper_dev))
4800                                 break;
4801                         if (info->linking)
4802                                 break;
4803                         br_dev = netdev_master_upper_dev_get(upper_dev);
4804                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4805                                                    br_dev);
4806                 }
4807                 break;
4808         }
4809
4810         return err;
4811 }
4812
4813 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4814                                                unsigned long event, void *ptr)
4815 {
4816         struct netdev_notifier_changelowerstate_info *info;
4817         struct mlxsw_sp_port *mlxsw_sp_port;
4818         int err;
4819
4820         mlxsw_sp_port = netdev_priv(dev);
4821         info = ptr;
4822
4823         switch (event) {
4824         case NETDEV_CHANGELOWERSTATE:
4825                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4826                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4827                                                         info->lower_state_info);
4828                         if (err)
4829                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4830                 }
4831                 break;
4832         }
4833
4834         return 0;
4835 }
4836
4837 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4838                                          struct net_device *port_dev,
4839                                          unsigned long event, void *ptr)
4840 {
4841         switch (event) {
4842         case NETDEV_PRECHANGEUPPER:
4843         case NETDEV_CHANGEUPPER:
4844                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4845                                                            event, ptr);
4846         case NETDEV_CHANGELOWERSTATE:
4847                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4848                                                            ptr);
4849         }
4850
4851         return 0;
4852 }
4853
4854 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4855                                         unsigned long event, void *ptr)
4856 {
4857         struct net_device *dev;
4858         struct list_head *iter;
4859         int ret;
4860
4861         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4862                 if (mlxsw_sp_port_dev_check(dev)) {
4863                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4864                                                             ptr);
4865                         if (ret)
4866                                 return ret;
4867                 }
4868         }
4869
4870         return 0;
4871 }
4872
4873 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4874                                               struct net_device *dev,
4875                                               unsigned long event, void *ptr,
4876                                               u16 vid)
4877 {
4878         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4879         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4880         struct netdev_notifier_changeupper_info *info = ptr;
4881         struct netlink_ext_ack *extack;
4882         struct net_device *upper_dev;
4883         int err = 0;
4884
4885         extack = netdev_notifier_info_to_extack(&info->info);
4886
4887         switch (event) {
4888         case NETDEV_PRECHANGEUPPER:
4889                 upper_dev = info->upper_dev;
4890                 if (!netif_is_bridge_master(upper_dev) &&
4891                     !netif_is_macvlan(upper_dev) &&
4892                     !netif_is_l3_master(upper_dev)) {
4893                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4894                         return -EINVAL;
4895                 }
4896                 if (!info->linking)
4897                         break;
4898                 if (netif_is_bridge_master(upper_dev) &&
4899                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4900                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4901                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4902                         return -EOPNOTSUPP;
4903                 if (netdev_has_any_upper_dev(upper_dev) &&
4904                     (!netif_is_bridge_master(upper_dev) ||
4905                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4906                                                           upper_dev))) {
4907                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4908                         return -EINVAL;
4909                 }
4910                 if (netif_is_macvlan(upper_dev) &&
4911                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4912                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4913                         return -EOPNOTSUPP;
4914                 }
4915                 break;
4916         case NETDEV_CHANGEUPPER:
4917                 upper_dev = info->upper_dev;
4918                 if (netif_is_bridge_master(upper_dev)) {
4919                         if (info->linking)
4920                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4921                                                                 vlan_dev,
4922                                                                 upper_dev,
4923                                                                 extack);
4924                         else
4925                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4926                                                            vlan_dev,
4927                                                            upper_dev);
4928                 } else if (netif_is_macvlan(upper_dev)) {
4929                         if (!info->linking)
4930                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4931                 }
4932                 break;
4933         }
4934
4935         return err;
4936 }
4937
4938 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4939                                                   struct net_device *lag_dev,
4940                                                   unsigned long event,
4941                                                   void *ptr, u16 vid)
4942 {
4943         struct net_device *dev;
4944         struct list_head *iter;
4945         int ret;
4946
4947         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4948                 if (mlxsw_sp_port_dev_check(dev)) {
4949                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4950                                                                  event, ptr,
4951                                                                  vid);
4952                         if (ret)
4953                                 return ret;
4954                 }
4955         }
4956
4957         return 0;
4958 }
4959
4960 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4961                                                 struct net_device *br_dev,
4962                                                 unsigned long event, void *ptr,
4963                                                 u16 vid)
4964 {
4965         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4966         struct netdev_notifier_changeupper_info *info = ptr;
4967         struct netlink_ext_ack *extack;
4968         struct net_device *upper_dev;
4969
4970         if (!mlxsw_sp)
4971                 return 0;
4972
4973         extack = netdev_notifier_info_to_extack(&info->info);
4974
4975         switch (event) {
4976         case NETDEV_PRECHANGEUPPER:
4977                 upper_dev = info->upper_dev;
4978                 if (!netif_is_macvlan(upper_dev) &&
4979                     !netif_is_l3_master(upper_dev)) {
4980                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4981                         return -EOPNOTSUPP;
4982                 }
4983                 if (!info->linking)
4984                         break;
4985                 if (netif_is_macvlan(upper_dev) &&
4986                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4987                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4988                         return -EOPNOTSUPP;
4989                 }
4990                 break;
4991         case NETDEV_CHANGEUPPER:
4992                 upper_dev = info->upper_dev;
4993                 if (info->linking)
4994                         break;
4995                 if (netif_is_macvlan(upper_dev))
4996                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4997                 break;
4998         }
4999
5000         return 0;
5001 }
5002
5003 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
5004                                          unsigned long event, void *ptr)
5005 {
5006         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5007         u16 vid = vlan_dev_vlan_id(vlan_dev);
5008
5009         if (mlxsw_sp_port_dev_check(real_dev))
5010                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5011                                                           event, ptr, vid);
5012         else if (netif_is_lag_master(real_dev))
5013                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5014                                                               real_dev, event,
5015                                                               ptr, vid);
5016         else if (netif_is_bridge_master(real_dev))
5017                 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
5018                                                             event, ptr, vid);
5019
5020         return 0;
5021 }
5022
5023 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
5024                                            unsigned long event, void *ptr)
5025 {
5026         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
5027         struct netdev_notifier_changeupper_info *info = ptr;
5028         struct netlink_ext_ack *extack;
5029         struct net_device *upper_dev;
5030         u16 proto;
5031
5032         if (!mlxsw_sp)
5033                 return 0;
5034
5035         extack = netdev_notifier_info_to_extack(&info->info);
5036
5037         switch (event) {
5038         case NETDEV_PRECHANGEUPPER:
5039                 upper_dev = info->upper_dev;
5040                 if (!is_vlan_dev(upper_dev) &&
5041                     !netif_is_macvlan(upper_dev) &&
5042                     !netif_is_l3_master(upper_dev)) {
5043                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5044                         return -EOPNOTSUPP;
5045                 }
5046                 if (!info->linking)
5047                         break;
5048                 if (br_vlan_enabled(br_dev)) {
5049                         br_vlan_get_proto(br_dev, &proto);
5050                         if (proto == ETH_P_8021AD) {
5051                                 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5052                                 return -EOPNOTSUPP;
5053                         }
5054                 }
5055                 if (is_vlan_dev(upper_dev) &&
5056                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5057                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5058                         return -EOPNOTSUPP;
5059                 }
5060                 if (netif_is_macvlan(upper_dev) &&
5061                     !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
5062                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5063                         return -EOPNOTSUPP;
5064                 }
5065                 break;
5066         case NETDEV_CHANGEUPPER:
5067                 upper_dev = info->upper_dev;
5068                 if (info->linking)
5069                         break;
5070                 if (is_vlan_dev(upper_dev))
5071                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5072                 if (netif_is_macvlan(upper_dev))
5073                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5074                 break;
5075         }
5076
5077         return 0;
5078 }
5079
5080 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5081                                             unsigned long event, void *ptr)
5082 {
5083         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5084         struct netdev_notifier_changeupper_info *info = ptr;
5085         struct netlink_ext_ack *extack;
5086         struct net_device *upper_dev;
5087
5088         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5089                 return 0;
5090
5091         extack = netdev_notifier_info_to_extack(&info->info);
5092         upper_dev = info->upper_dev;
5093
5094         if (!netif_is_l3_master(upper_dev)) {
5095                 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5096                 return -EOPNOTSUPP;
5097         }
5098
5099         return 0;
5100 }
5101
5102 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5103                                           struct net_device *dev,
5104                                           unsigned long event, void *ptr)
5105 {
5106         struct netdev_notifier_changeupper_info *cu_info;
5107         struct netdev_notifier_info *info = ptr;
5108         struct netlink_ext_ack *extack;
5109         struct net_device *upper_dev;
5110
5111         extack = netdev_notifier_info_to_extack(info);
5112
5113         switch (event) {
5114         case NETDEV_CHANGEUPPER:
5115                 cu_info = container_of(info,
5116                                        struct netdev_notifier_changeupper_info,
5117                                        info);
5118                 upper_dev = cu_info->upper_dev;
5119                 if (!netif_is_bridge_master(upper_dev))
5120                         return 0;
5121                 if (!mlxsw_sp_lower_get(upper_dev))
5122                         return 0;
5123                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5124                         return -EOPNOTSUPP;
5125                 if (cu_info->linking) {
5126                         if (!netif_running(dev))
5127                                 return 0;
5128                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
5129                          * device needs to be mapped to a VLAN, but at this
5130                          * point no VLANs are configured on the VxLAN device
5131                          */
5132                         if (br_vlan_enabled(upper_dev))
5133                                 return 0;
5134                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5135                                                           dev, 0, extack);
5136                 } else {
5137                         /* VLANs were already flushed, which triggered the
5138                          * necessary cleanup
5139                          */
5140                         if (br_vlan_enabled(upper_dev))
5141                                 return 0;
5142                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5143                 }
5144                 break;
5145         case NETDEV_PRE_UP:
5146                 upper_dev = netdev_master_upper_dev_get(dev);
5147                 if (!upper_dev)
5148                         return 0;
5149                 if (!netif_is_bridge_master(upper_dev))
5150                         return 0;
5151                 if (!mlxsw_sp_lower_get(upper_dev))
5152                         return 0;
5153                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5154                                                   extack);
5155         case NETDEV_DOWN:
5156                 upper_dev = netdev_master_upper_dev_get(dev);
5157                 if (!upper_dev)
5158                         return 0;
5159                 if (!netif_is_bridge_master(upper_dev))
5160                         return 0;
5161                 if (!mlxsw_sp_lower_get(upper_dev))
5162                         return 0;
5163                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5164                 break;
5165         }
5166
5167         return 0;
5168 }
5169
5170 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5171                                     unsigned long event, void *ptr)
5172 {
5173         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5174         struct mlxsw_sp_span_entry *span_entry;
5175         struct mlxsw_sp *mlxsw_sp;
5176         int err = 0;
5177
5178         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5179         if (event == NETDEV_UNREGISTER) {
5180                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5181                 if (span_entry)
5182                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5183         }
5184         mlxsw_sp_span_respin(mlxsw_sp);
5185
5186         if (netif_is_vxlan(dev))
5187                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5188         else if (mlxsw_sp_port_dev_check(dev))
5189                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
5190         else if (netif_is_lag_master(dev))
5191                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5192         else if (is_vlan_dev(dev))
5193                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
5194         else if (netif_is_bridge_master(dev))
5195                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
5196         else if (netif_is_macvlan(dev))
5197                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5198
5199         return notifier_from_errno(err);
5200 }
5201
5202 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
5203         .notifier_call = mlxsw_sp_inetaddr_valid_event,
5204 };
5205
5206 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
5207         .notifier_call = mlxsw_sp_inet6addr_valid_event,
5208 };
5209
5210 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5211         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5212         {0, },
5213 };
5214
5215 static struct pci_driver mlxsw_sp1_pci_driver = {
5216         .name = mlxsw_sp1_driver_name,
5217         .id_table = mlxsw_sp1_pci_id_table,
5218 };
5219
5220 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5221         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5222         {0, },
5223 };
5224
5225 static struct pci_driver mlxsw_sp2_pci_driver = {
5226         .name = mlxsw_sp2_driver_name,
5227         .id_table = mlxsw_sp2_pci_id_table,
5228 };
5229
5230 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5231         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5232         {0, },
5233 };
5234
5235 static struct pci_driver mlxsw_sp3_pci_driver = {
5236         .name = mlxsw_sp3_driver_name,
5237         .id_table = mlxsw_sp3_pci_id_table,
5238 };
5239
5240 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5241         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5242         {0, },
5243 };
5244
5245 static struct pci_driver mlxsw_sp4_pci_driver = {
5246         .name = mlxsw_sp4_driver_name,
5247         .id_table = mlxsw_sp4_pci_id_table,
5248 };
5249
5250 static int __init mlxsw_sp_module_init(void)
5251 {
5252         int err;
5253
5254         register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5255         register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5256
5257         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5258         if (err)
5259                 goto err_sp1_core_driver_register;
5260
5261         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5262         if (err)
5263                 goto err_sp2_core_driver_register;
5264
5265         err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5266         if (err)
5267                 goto err_sp3_core_driver_register;
5268
5269         err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5270         if (err)
5271                 goto err_sp4_core_driver_register;
5272
5273         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5274         if (err)
5275                 goto err_sp1_pci_driver_register;
5276
5277         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5278         if (err)
5279                 goto err_sp2_pci_driver_register;
5280
5281         err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5282         if (err)
5283                 goto err_sp3_pci_driver_register;
5284
5285         err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5286         if (err)
5287                 goto err_sp4_pci_driver_register;
5288
5289         return 0;
5290
5291 err_sp4_pci_driver_register:
5292         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5293 err_sp3_pci_driver_register:
5294         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5295 err_sp2_pci_driver_register:
5296         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5297 err_sp1_pci_driver_register:
5298         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5299 err_sp4_core_driver_register:
5300         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5301 err_sp3_core_driver_register:
5302         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5303 err_sp2_core_driver_register:
5304         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5305 err_sp1_core_driver_register:
5306         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5307         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5308         return err;
5309 }
5310
5311 static void __exit mlxsw_sp_module_exit(void)
5312 {
5313         mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5314         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5315         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5316         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5317         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5318         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5319         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5320         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5321         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5322         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5323 }
5324
5325 module_init(mlxsw_sp_module_init);
5326 module_exit(mlxsw_sp_module_exit);
5327
5328 MODULE_LICENSE("Dual BSD/GPL");
5329 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5330 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5331 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5332 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5333 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5334 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5335 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5336 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5337 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5338 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);