Merge tag 'drm-misc-next-fixes-2023-01-03' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <linux/refcount.h>
27 #include <linux/rhashtable.h>
28 #include <net/switchdev.h>
29 #include <net/pkt_cls.h>
30 #include <net/netevent.h>
31 #include <net/addrconf.h>
32 #include <linux/ptp_classify.h>
33
34 #include "spectrum.h"
35 #include "pci.h"
36 #include "core.h"
37 #include "core_env.h"
38 #include "reg.h"
39 #include "port.h"
40 #include "trap.h"
41 #include "txheader.h"
42 #include "spectrum_cnt.h"
43 #include "spectrum_dpipe.h"
44 #include "spectrum_acl_flex_actions.h"
45 #include "spectrum_span.h"
46 #include "spectrum_ptp.h"
47 #include "spectrum_trap.h"
48
49 #define MLXSW_SP_FWREV_MINOR 2010
50 #define MLXSW_SP_FWREV_SUBMINOR 1006
51
52 #define MLXSW_SP1_FWREV_MAJOR 13
53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
54
55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56         .major = MLXSW_SP1_FWREV_MAJOR,
57         .minor = MLXSW_SP_FWREV_MINOR,
58         .subminor = MLXSW_SP_FWREV_SUBMINOR,
59         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
60 };
61
62 #define MLXSW_SP1_FW_FILENAME \
63         "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
64         "." __stringify(MLXSW_SP_FWREV_MINOR) \
65         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
66
67 #define MLXSW_SP2_FWREV_MAJOR 29
68
69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70         .major = MLXSW_SP2_FWREV_MAJOR,
71         .minor = MLXSW_SP_FWREV_MINOR,
72         .subminor = MLXSW_SP_FWREV_SUBMINOR,
73 };
74
75 #define MLXSW_SP2_FW_FILENAME \
76         "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77         "." __stringify(MLXSW_SP_FWREV_MINOR) \
78         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
79
80 #define MLXSW_SP3_FWREV_MAJOR 30
81
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83         .major = MLXSW_SP3_FWREV_MAJOR,
84         .minor = MLXSW_SP_FWREV_MINOR,
85         .subminor = MLXSW_SP_FWREV_SUBMINOR,
86 };
87
88 #define MLXSW_SP3_FW_FILENAME \
89         "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90         "." __stringify(MLXSW_SP_FWREV_MINOR) \
91         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
92
93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
94         "mellanox/lc_ini_bundle_" \
95         __stringify(MLXSW_SP_FWREV_MINOR) "_" \
96         __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
97
98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
102
103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
104         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
105 };
106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
107         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108 };
109
110 /* tx_hdr_version
111  * Tx header version.
112  * Must be set to 1.
113  */
114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
115
116 /* tx_hdr_ctl
117  * Packet control type.
118  * 0 - Ethernet control (e.g. EMADs, LACP)
119  * 1 - Ethernet data
120  */
121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
122
123 /* tx_hdr_proto
124  * Packet protocol type. Must be set to 1 (Ethernet).
125  */
126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
127
128 /* tx_hdr_rx_is_router
129  * Packet is sent from the router. Valid for data packets only.
130  */
131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
132
133 /* tx_hdr_fid_valid
134  * Indicates if the 'fid' field is valid and should be used for
135  * forwarding lookup. Valid for data packets only.
136  */
137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
138
139 /* tx_hdr_swid
140  * Switch partition ID. Must be set to 0.
141  */
142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
143
144 /* tx_hdr_control_tclass
145  * Indicates if the packet should use the control TClass and not one
146  * of the data TClasses.
147  */
148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
149
150 /* tx_hdr_etclass
151  * Egress TClass to be used on the egress device on the egress port.
152  */
153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
154
155 /* tx_hdr_port_mid
156  * Destination local port for unicast packets.
157  * Destination multicast ID for multicast packets.
158  *
159  * Control packets are directed to a specific egress port, while data
160  * packets are transmitted through the CPU port (0) into the switch partition,
161  * where forwarding rules are applied.
162  */
163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
164
165 /* tx_hdr_fid
166  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
167  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
168  * Valid for data packets only.
169  */
170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
171
172 /* tx_hdr_type
173  * 0 - Data packets
174  * 6 - Control packets
175  */
176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
177
178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
179                               unsigned int counter_index, u64 *packets,
180                               u64 *bytes)
181 {
182         char mgpc_pl[MLXSW_REG_MGPC_LEN];
183         int err;
184
185         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
186                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
187         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
188         if (err)
189                 return err;
190         if (packets)
191                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
192         if (bytes)
193                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
194         return 0;
195 }
196
197 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
198                                        unsigned int counter_index)
199 {
200         char mgpc_pl[MLXSW_REG_MGPC_LEN];
201
202         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
203                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
204         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
205 }
206
207 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
208                                 unsigned int *p_counter_index)
209 {
210         int err;
211
212         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
213                                      p_counter_index);
214         if (err)
215                 return err;
216         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
217         if (err)
218                 goto err_counter_clear;
219         return 0;
220
221 err_counter_clear:
222         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
223                               *p_counter_index);
224         return err;
225 }
226
227 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
228                                 unsigned int counter_index)
229 {
230          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
231                                counter_index);
232 }
233
234 void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
235                               const struct mlxsw_tx_info *tx_info)
236 {
237         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
238
239         memset(txhdr, 0, MLXSW_TXHDR_LEN);
240
241         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
242         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
243         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
244         mlxsw_tx_hdr_swid_set(txhdr, 0);
245         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
246         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
247         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
248 }
249
250 int
251 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
252                                   struct mlxsw_sp_port *mlxsw_sp_port,
253                                   struct sk_buff *skb,
254                                   const struct mlxsw_tx_info *tx_info)
255 {
256         char *txhdr;
257         u16 max_fid;
258         int err;
259
260         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
261                 err = -ENOMEM;
262                 goto err_skb_cow_head;
263         }
264
265         if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
266                 err = -EIO;
267                 goto err_res_valid;
268         }
269         max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
270
271         txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
272         memset(txhdr, 0, MLXSW_TXHDR_LEN);
273
274         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
275         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
276         mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
277         mlxsw_tx_hdr_fid_valid_set(txhdr, true);
278         mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
279         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
280         return 0;
281
282 err_res_valid:
283 err_skb_cow_head:
284         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
285         dev_kfree_skb_any(skb);
286         return err;
287 }
288
289 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
290 {
291         unsigned int type;
292
293         if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
294                 return false;
295
296         type = ptp_classify_raw(skb);
297         return !!ptp_parse_header(skb, type);
298 }
299
300 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
301                                  struct mlxsw_sp_port *mlxsw_sp_port,
302                                  struct sk_buff *skb,
303                                  const struct mlxsw_tx_info *tx_info)
304 {
305         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
306
307         /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
308          * need special handling and cannot be transmitted as regular control
309          * packets.
310          */
311         if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
312                 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
313                                                           mlxsw_sp_port, skb,
314                                                           tx_info);
315
316         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
317                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
318                 dev_kfree_skb_any(skb);
319                 return -ENOMEM;
320         }
321
322         mlxsw_sp_txhdr_construct(skb, tx_info);
323         return 0;
324 }
325
326 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
327 {
328         switch (state) {
329         case BR_STATE_FORWARDING:
330                 return MLXSW_REG_SPMS_STATE_FORWARDING;
331         case BR_STATE_LEARNING:
332                 return MLXSW_REG_SPMS_STATE_LEARNING;
333         case BR_STATE_LISTENING:
334         case BR_STATE_DISABLED:
335         case BR_STATE_BLOCKING:
336                 return MLXSW_REG_SPMS_STATE_DISCARDING;
337         default:
338                 BUG();
339         }
340 }
341
342 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
343                               u8 state)
344 {
345         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
346         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
347         char *spms_pl;
348         int err;
349
350         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
351         if (!spms_pl)
352                 return -ENOMEM;
353         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
354         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
355
356         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
357         kfree(spms_pl);
358         return err;
359 }
360
361 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
362 {
363         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
364         int err;
365
366         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
367         if (err)
368                 return err;
369         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
370         return 0;
371 }
372
373 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
374                                    bool is_up)
375 {
376         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
377         char paos_pl[MLXSW_REG_PAOS_LEN];
378
379         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
380                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
381                             MLXSW_PORT_ADMIN_STATUS_DOWN);
382         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
383 }
384
385 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
386                                       const unsigned char *addr)
387 {
388         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
389         char ppad_pl[MLXSW_REG_PPAD_LEN];
390
391         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
392         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
393         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
394 }
395
396 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
397 {
398         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
399
400         eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
401                         mlxsw_sp_port->local_port);
402         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
403                                           mlxsw_sp_port->dev->dev_addr);
404 }
405
406 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
407 {
408         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409         char pmtu_pl[MLXSW_REG_PMTU_LEN];
410         int err;
411
412         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
413         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
414         if (err)
415                 return err;
416
417         *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
418         return 0;
419 }
420
421 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
422 {
423         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
424         char pmtu_pl[MLXSW_REG_PMTU_LEN];
425
426         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
427         if (mtu > mlxsw_sp_port->max_mtu)
428                 return -EINVAL;
429
430         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
431         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
432 }
433
434 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
435                                   u16 local_port, u8 swid)
436 {
437         char pspa_pl[MLXSW_REG_PSPA_LEN];
438
439         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
440         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
441 }
442
443 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
444 {
445         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
446         char svpe_pl[MLXSW_REG_SVPE_LEN];
447
448         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
449         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
450 }
451
452 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
453                                    bool learn_enable)
454 {
455         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
456         char *spvmlr_pl;
457         int err;
458
459         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
460         if (!spvmlr_pl)
461                 return -ENOMEM;
462         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
463                               learn_enable);
464         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
465         kfree(spvmlr_pl);
466         return err;
467 }
468
469 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
470 {
471         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
472         char spfsr_pl[MLXSW_REG_SPFSR_LEN];
473         int err;
474
475         if (mlxsw_sp_port->security == enable)
476                 return 0;
477
478         mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
479         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
480         if (err)
481                 return err;
482
483         mlxsw_sp_port->security = enable;
484         return 0;
485 }
486
487 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
488 {
489         switch (ethtype) {
490         case ETH_P_8021Q:
491                 *p_sver_type = 0;
492                 break;
493         case ETH_P_8021AD:
494                 *p_sver_type = 1;
495                 break;
496         default:
497                 return -EINVAL;
498         }
499
500         return 0;
501 }
502
503 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
504                                      u16 ethtype)
505 {
506         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
507         char spevet_pl[MLXSW_REG_SPEVET_LEN];
508         u8 sver_type;
509         int err;
510
511         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
512         if (err)
513                 return err;
514
515         mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
516         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
517 }
518
519 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
520                                     u16 vid, u16 ethtype)
521 {
522         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523         char spvid_pl[MLXSW_REG_SPVID_LEN];
524         u8 sver_type;
525         int err;
526
527         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
528         if (err)
529                 return err;
530
531         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
532                              sver_type);
533
534         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
535 }
536
537 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
538                                             bool allow)
539 {
540         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
541         char spaft_pl[MLXSW_REG_SPAFT_LEN];
542
543         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
544         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
545 }
546
547 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
548                            u16 ethtype)
549 {
550         int err;
551
552         if (!vid) {
553                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
554                 if (err)
555                         return err;
556         } else {
557                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
558                 if (err)
559                         return err;
560                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
561                 if (err)
562                         goto err_port_allow_untagged_set;
563         }
564
565         mlxsw_sp_port->pvid = vid;
566         return 0;
567
568 err_port_allow_untagged_set:
569         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
570         return err;
571 }
572
573 static int
574 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
575 {
576         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577         char sspr_pl[MLXSW_REG_SSPR_LEN];
578
579         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
580         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
581 }
582
583 static int
584 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
585                                 u16 local_port, char *pmlp_pl,
586                                 struct mlxsw_sp_port_mapping *port_mapping)
587 {
588         bool separate_rxtx;
589         u8 first_lane;
590         u8 slot_index;
591         u8 module;
592         u8 width;
593         int i;
594
595         module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
596         slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
597         width = mlxsw_reg_pmlp_width_get(pmlp_pl);
598         separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
599         first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
600
601         if (width && !is_power_of_2(width)) {
602                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
603                         local_port);
604                 return -EINVAL;
605         }
606
607         for (i = 0; i < width; i++) {
608                 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
609                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
610                                 local_port);
611                         return -EINVAL;
612                 }
613                 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
614                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
615                                 local_port);
616                         return -EINVAL;
617                 }
618                 if (separate_rxtx &&
619                     mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
620                     mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
621                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
622                                 local_port);
623                         return -EINVAL;
624                 }
625                 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
626                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
627                                 local_port);
628                         return -EINVAL;
629                 }
630         }
631
632         port_mapping->module = module;
633         port_mapping->slot_index = slot_index;
634         port_mapping->width = width;
635         port_mapping->module_width = width;
636         port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
637         return 0;
638 }
639
640 static int
641 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
642                               struct mlxsw_sp_port_mapping *port_mapping)
643 {
644         char pmlp_pl[MLXSW_REG_PMLP_LEN];
645         int err;
646
647         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
648         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
649         if (err)
650                 return err;
651         return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
652                                                pmlp_pl, port_mapping);
653 }
654
655 static int
656 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
657                          const struct mlxsw_sp_port_mapping *port_mapping)
658 {
659         char pmlp_pl[MLXSW_REG_PMLP_LEN];
660         int i, err;
661
662         mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
663                                   port_mapping->module);
664
665         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
666         mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
667         for (i = 0; i < port_mapping->width; i++) {
668                 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
669                                               port_mapping->slot_index);
670                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
671                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
672         }
673
674         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
675         if (err)
676                 goto err_pmlp_write;
677         return 0;
678
679 err_pmlp_write:
680         mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
681                                     port_mapping->module);
682         return err;
683 }
684
685 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
686                                        u8 slot_index, u8 module)
687 {
688         char pmlp_pl[MLXSW_REG_PMLP_LEN];
689
690         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
691         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
692         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
693         mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
694 }
695
696 static int mlxsw_sp_port_open(struct net_device *dev)
697 {
698         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
699         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
700         int err;
701
702         err = mlxsw_env_module_port_up(mlxsw_sp->core,
703                                        mlxsw_sp_port->mapping.slot_index,
704                                        mlxsw_sp_port->mapping.module);
705         if (err)
706                 return err;
707         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
708         if (err)
709                 goto err_port_admin_status_set;
710         netif_start_queue(dev);
711         return 0;
712
713 err_port_admin_status_set:
714         mlxsw_env_module_port_down(mlxsw_sp->core,
715                                    mlxsw_sp_port->mapping.slot_index,
716                                    mlxsw_sp_port->mapping.module);
717         return err;
718 }
719
720 static int mlxsw_sp_port_stop(struct net_device *dev)
721 {
722         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
723         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724
725         netif_stop_queue(dev);
726         mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
727         mlxsw_env_module_port_down(mlxsw_sp->core,
728                                    mlxsw_sp_port->mapping.slot_index,
729                                    mlxsw_sp_port->mapping.module);
730         return 0;
731 }
732
733 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
734                                       struct net_device *dev)
735 {
736         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
737         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
738         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
739         const struct mlxsw_tx_info tx_info = {
740                 .local_port = mlxsw_sp_port->local_port,
741                 .is_emad = false,
742         };
743         u64 len;
744         int err;
745
746         memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
747
748         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
749                 return NETDEV_TX_BUSY;
750
751         if (eth_skb_pad(skb)) {
752                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
753                 return NETDEV_TX_OK;
754         }
755
756         err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
757                                     &tx_info);
758         if (err)
759                 return NETDEV_TX_OK;
760
761         /* TX header is consumed by HW on the way so we shouldn't count its
762          * bytes as being sent.
763          */
764         len = skb->len - MLXSW_TXHDR_LEN;
765
766         /* Due to a race we might fail here because of a full queue. In that
767          * unlikely case we simply drop the packet.
768          */
769         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
770
771         if (!err) {
772                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
773                 u64_stats_update_begin(&pcpu_stats->syncp);
774                 pcpu_stats->tx_packets++;
775                 pcpu_stats->tx_bytes += len;
776                 u64_stats_update_end(&pcpu_stats->syncp);
777         } else {
778                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
779                 dev_kfree_skb_any(skb);
780         }
781         return NETDEV_TX_OK;
782 }
783
784 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
785 {
786 }
787
788 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
789 {
790         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791         struct sockaddr *addr = p;
792         int err;
793
794         if (!is_valid_ether_addr(addr->sa_data))
795                 return -EADDRNOTAVAIL;
796
797         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
798         if (err)
799                 return err;
800         eth_hw_addr_set(dev, addr->sa_data);
801         return 0;
802 }
803
804 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
805 {
806         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
807         struct mlxsw_sp_hdroom orig_hdroom;
808         struct mlxsw_sp_hdroom hdroom;
809         int err;
810
811         orig_hdroom = *mlxsw_sp_port->hdroom;
812
813         hdroom = orig_hdroom;
814         hdroom.mtu = mtu;
815         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
816
817         err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
818         if (err) {
819                 netdev_err(dev, "Failed to configure port's headroom\n");
820                 return err;
821         }
822
823         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
824         if (err)
825                 goto err_port_mtu_set;
826         dev->mtu = mtu;
827         return 0;
828
829 err_port_mtu_set:
830         mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
831         return err;
832 }
833
834 static int
835 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
836                              struct rtnl_link_stats64 *stats)
837 {
838         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
839         struct mlxsw_sp_port_pcpu_stats *p;
840         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
841         u32 tx_dropped = 0;
842         unsigned int start;
843         int i;
844
845         for_each_possible_cpu(i) {
846                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
847                 do {
848                         start = u64_stats_fetch_begin(&p->syncp);
849                         rx_packets      = p->rx_packets;
850                         rx_bytes        = p->rx_bytes;
851                         tx_packets      = p->tx_packets;
852                         tx_bytes        = p->tx_bytes;
853                 } while (u64_stats_fetch_retry(&p->syncp, start));
854
855                 stats->rx_packets       += rx_packets;
856                 stats->rx_bytes         += rx_bytes;
857                 stats->tx_packets       += tx_packets;
858                 stats->tx_bytes         += tx_bytes;
859                 /* tx_dropped is u32, updated without syncp protection. */
860                 tx_dropped      += p->tx_dropped;
861         }
862         stats->tx_dropped       = tx_dropped;
863         return 0;
864 }
865
866 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
867 {
868         switch (attr_id) {
869         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
870                 return true;
871         }
872
873         return false;
874 }
875
876 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
877                                            void *sp)
878 {
879         switch (attr_id) {
880         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
881                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
882         }
883
884         return -EINVAL;
885 }
886
887 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
888                                 int prio, char *ppcnt_pl)
889 {
890         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
891         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
892
893         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
894         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
895 }
896
897 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
898                                       struct rtnl_link_stats64 *stats)
899 {
900         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
901         int err;
902
903         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
904                                           0, ppcnt_pl);
905         if (err)
906                 goto out;
907
908         stats->tx_packets =
909                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
910         stats->rx_packets =
911                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
912         stats->tx_bytes =
913                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
914         stats->rx_bytes =
915                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
916         stats->multicast =
917                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
918
919         stats->rx_crc_errors =
920                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
921         stats->rx_frame_errors =
922                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
923
924         stats->rx_length_errors = (
925                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
926                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
927                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
928
929         stats->rx_errors = (stats->rx_crc_errors +
930                 stats->rx_frame_errors + stats->rx_length_errors);
931
932 out:
933         return err;
934 }
935
936 static void
937 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
938                             struct mlxsw_sp_port_xstats *xstats)
939 {
940         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
941         int err, i;
942
943         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
944                                           ppcnt_pl);
945         if (!err)
946                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
947
948         for (i = 0; i < TC_MAX_QUEUE; i++) {
949                 err = mlxsw_sp_port_get_stats_raw(dev,
950                                                   MLXSW_REG_PPCNT_TC_CONG_CNT,
951                                                   i, ppcnt_pl);
952                 if (err)
953                         goto tc_cnt;
954
955                 xstats->wred_drop[i] =
956                         mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
957                 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
958
959 tc_cnt:
960                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
961                                                   i, ppcnt_pl);
962                 if (err)
963                         continue;
964
965                 xstats->backlog[i] =
966                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
967                 xstats->tail_drop[i] =
968                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
969         }
970
971         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
972                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
973                                                   i, ppcnt_pl);
974                 if (err)
975                         continue;
976
977                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
978                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
979         }
980 }
981
982 static void update_stats_cache(struct work_struct *work)
983 {
984         struct mlxsw_sp_port *mlxsw_sp_port =
985                 container_of(work, struct mlxsw_sp_port,
986                              periodic_hw_stats.update_dw.work);
987
988         if (!netif_carrier_ok(mlxsw_sp_port->dev))
989                 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
990                  * necessary when port goes down.
991                  */
992                 goto out;
993
994         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
995                                    &mlxsw_sp_port->periodic_hw_stats.stats);
996         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
997                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
998
999 out:
1000         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1001                                MLXSW_HW_STATS_UPDATE_TIME);
1002 }
1003
1004 /* Return the stats from a cache that is updated periodically,
1005  * as this function might get called in an atomic context.
1006  */
1007 static void
1008 mlxsw_sp_port_get_stats64(struct net_device *dev,
1009                           struct rtnl_link_stats64 *stats)
1010 {
1011         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1012
1013         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1014 }
1015
1016 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1017                                     u16 vid_begin, u16 vid_end,
1018                                     bool is_member, bool untagged)
1019 {
1020         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1021         char *spvm_pl;
1022         int err;
1023
1024         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1025         if (!spvm_pl)
1026                 return -ENOMEM;
1027
1028         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1029                             vid_end, is_member, untagged);
1030         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1031         kfree(spvm_pl);
1032         return err;
1033 }
1034
1035 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1036                            u16 vid_end, bool is_member, bool untagged)
1037 {
1038         u16 vid, vid_e;
1039         int err;
1040
1041         for (vid = vid_begin; vid <= vid_end;
1042              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1043                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1044                             vid_end);
1045
1046                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1047                                                is_member, untagged);
1048                 if (err)
1049                         return err;
1050         }
1051
1052         return 0;
1053 }
1054
1055 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1056                                      bool flush_default)
1057 {
1058         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1059
1060         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1061                                  &mlxsw_sp_port->vlans_list, list) {
1062                 if (!flush_default &&
1063                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1064                         continue;
1065                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1066         }
1067 }
1068
1069 static void
1070 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1071 {
1072         if (mlxsw_sp_port_vlan->bridge_port)
1073                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1074         else if (mlxsw_sp_port_vlan->fid)
1075                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1076 }
1077
1078 struct mlxsw_sp_port_vlan *
1079 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1080 {
1081         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1082         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1083         int err;
1084
1085         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1086         if (mlxsw_sp_port_vlan)
1087                 return ERR_PTR(-EEXIST);
1088
1089         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1090         if (err)
1091                 return ERR_PTR(err);
1092
1093         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1094         if (!mlxsw_sp_port_vlan) {
1095                 err = -ENOMEM;
1096                 goto err_port_vlan_alloc;
1097         }
1098
1099         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1100         mlxsw_sp_port_vlan->vid = vid;
1101         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1102
1103         return mlxsw_sp_port_vlan;
1104
1105 err_port_vlan_alloc:
1106         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1107         return ERR_PTR(err);
1108 }
1109
1110 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1111 {
1112         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1113         u16 vid = mlxsw_sp_port_vlan->vid;
1114
1115         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1116         list_del(&mlxsw_sp_port_vlan->list);
1117         kfree(mlxsw_sp_port_vlan);
1118         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1119 }
1120
1121 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1122                                  __be16 __always_unused proto, u16 vid)
1123 {
1124         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1125
1126         /* VLAN 0 is added to HW filter when device goes up, but it is
1127          * reserved in our case, so simply return.
1128          */
1129         if (!vid)
1130                 return 0;
1131
1132         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1133 }
1134
1135 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1136                                   __be16 __always_unused proto, u16 vid)
1137 {
1138         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1139         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1140
1141         /* VLAN 0 is removed from HW filter when device goes down, but
1142          * it is reserved in our case, so simply return.
1143          */
1144         if (!vid)
1145                 return 0;
1146
1147         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1148         if (!mlxsw_sp_port_vlan)
1149                 return 0;
1150         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1151
1152         return 0;
1153 }
1154
1155 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1156                                    struct flow_block_offload *f)
1157 {
1158         switch (f->binder_type) {
1159         case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1160                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1161         case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1162                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1163         case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1164                 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1165         case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1166                 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1167         default:
1168                 return -EOPNOTSUPP;
1169         }
1170 }
1171
1172 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1173                              void *type_data)
1174 {
1175         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1176
1177         switch (type) {
1178         case TC_SETUP_BLOCK:
1179                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1180         case TC_SETUP_QDISC_RED:
1181                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1182         case TC_SETUP_QDISC_PRIO:
1183                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1184         case TC_SETUP_QDISC_ETS:
1185                 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1186         case TC_SETUP_QDISC_TBF:
1187                 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1188         case TC_SETUP_QDISC_FIFO:
1189                 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1190         default:
1191                 return -EOPNOTSUPP;
1192         }
1193 }
1194
1195 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1196 {
1197         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1198
1199         if (!enable) {
1200                 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1201                     mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1202                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1203                         return -EINVAL;
1204                 }
1205                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1206                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1207         } else {
1208                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1209                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1210         }
1211         return 0;
1212 }
1213
1214 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1215 {
1216         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1217         char pplr_pl[MLXSW_REG_PPLR_LEN];
1218         int err;
1219
1220         if (netif_running(dev))
1221                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1222
1223         mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1224         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1225                               pplr_pl);
1226
1227         if (netif_running(dev))
1228                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1229
1230         return err;
1231 }
1232
1233 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1234
1235 static int mlxsw_sp_handle_feature(struct net_device *dev,
1236                                    netdev_features_t wanted_features,
1237                                    netdev_features_t feature,
1238                                    mlxsw_sp_feature_handler feature_handler)
1239 {
1240         netdev_features_t changes = wanted_features ^ dev->features;
1241         bool enable = !!(wanted_features & feature);
1242         int err;
1243
1244         if (!(changes & feature))
1245                 return 0;
1246
1247         err = feature_handler(dev, enable);
1248         if (err) {
1249                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1250                            enable ? "Enable" : "Disable", &feature, err);
1251                 return err;
1252         }
1253
1254         if (enable)
1255                 dev->features |= feature;
1256         else
1257                 dev->features &= ~feature;
1258
1259         return 0;
1260 }
1261 static int mlxsw_sp_set_features(struct net_device *dev,
1262                                  netdev_features_t features)
1263 {
1264         netdev_features_t oper_features = dev->features;
1265         int err = 0;
1266
1267         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1268                                        mlxsw_sp_feature_hw_tc);
1269         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1270                                        mlxsw_sp_feature_loopback);
1271
1272         if (err) {
1273                 dev->features = oper_features;
1274                 return -EINVAL;
1275         }
1276
1277         return 0;
1278 }
1279
1280 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1281                                       struct ifreq *ifr)
1282 {
1283         struct hwtstamp_config config;
1284         int err;
1285
1286         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1287                 return -EFAULT;
1288
1289         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1290                                                              &config);
1291         if (err)
1292                 return err;
1293
1294         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1295                 return -EFAULT;
1296
1297         return 0;
1298 }
1299
1300 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1301                                       struct ifreq *ifr)
1302 {
1303         struct hwtstamp_config config;
1304         int err;
1305
1306         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1307                                                              &config);
1308         if (err)
1309                 return err;
1310
1311         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1312                 return -EFAULT;
1313
1314         return 0;
1315 }
1316
1317 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1318 {
1319         struct hwtstamp_config config = {0};
1320
1321         mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1322 }
1323
1324 static int
1325 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1326 {
1327         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1328
1329         switch (cmd) {
1330         case SIOCSHWTSTAMP:
1331                 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1332         case SIOCGHWTSTAMP:
1333                 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1334         default:
1335                 return -EOPNOTSUPP;
1336         }
1337 }
1338
1339 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1340         .ndo_open               = mlxsw_sp_port_open,
1341         .ndo_stop               = mlxsw_sp_port_stop,
1342         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1343         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1344         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1345         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1346         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1347         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1348         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1349         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1350         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1351         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1352         .ndo_set_features       = mlxsw_sp_set_features,
1353         .ndo_eth_ioctl          = mlxsw_sp_port_ioctl,
1354 };
1355
1356 static int
1357 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1358 {
1359         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1360         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1361         const struct mlxsw_sp_port_type_speed_ops *ops;
1362         char ptys_pl[MLXSW_REG_PTYS_LEN];
1363         u32 eth_proto_cap_masked;
1364         int err;
1365
1366         ops = mlxsw_sp->port_type_speed_ops;
1367
1368         /* Set advertised speeds to speeds supported by both the driver
1369          * and the device.
1370          */
1371         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1372                                0, false);
1373         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1374         if (err)
1375                 return err;
1376
1377         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1378                                  &eth_proto_admin, &eth_proto_oper);
1379         eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1380         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1381                                eth_proto_cap_masked,
1382                                mlxsw_sp_port->link.autoneg);
1383         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1384 }
1385
1386 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1387 {
1388         const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1389         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1390         char ptys_pl[MLXSW_REG_PTYS_LEN];
1391         u32 eth_proto_oper;
1392         int err;
1393
1394         port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1395         port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1396                                                mlxsw_sp_port->local_port, 0,
1397                                                false);
1398         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1399         if (err)
1400                 return err;
1401         port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1402                                                  &eth_proto_oper);
1403         *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1404         return 0;
1405 }
1406
1407 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1408                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1409                           bool dwrr, u8 dwrr_weight)
1410 {
1411         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1412         char qeec_pl[MLXSW_REG_QEEC_LEN];
1413
1414         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1415                             next_index);
1416         mlxsw_reg_qeec_de_set(qeec_pl, true);
1417         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1418         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1419         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1420 }
1421
1422 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1423                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1424                                   u8 next_index, u32 maxrate, u8 burst_size)
1425 {
1426         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1427         char qeec_pl[MLXSW_REG_QEEC_LEN];
1428
1429         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1430                             next_index);
1431         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1432         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1433         mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1434         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1435 }
1436
1437 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1438                                     enum mlxsw_reg_qeec_hr hr, u8 index,
1439                                     u8 next_index, u32 minrate)
1440 {
1441         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1442         char qeec_pl[MLXSW_REG_QEEC_LEN];
1443
1444         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1445                             next_index);
1446         mlxsw_reg_qeec_mise_set(qeec_pl, true);
1447         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1448
1449         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1450 }
1451
1452 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1453                               u8 switch_prio, u8 tclass)
1454 {
1455         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1456         char qtct_pl[MLXSW_REG_QTCT_LEN];
1457
1458         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1459                             tclass);
1460         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1461 }
1462
1463 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1464 {
1465         int err, i;
1466
1467         /* Setup the elements hierarcy, so that each TC is linked to
1468          * one subgroup, which are all member in the same group.
1469          */
1470         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1471                                     MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1472         if (err)
1473                 return err;
1474         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1475                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1476                                             MLXSW_REG_QEEC_HR_SUBGROUP, i,
1477                                             0, false, 0);
1478                 if (err)
1479                         return err;
1480         }
1481         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1482                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1483                                             MLXSW_REG_QEEC_HR_TC, i, i,
1484                                             false, 0);
1485                 if (err)
1486                         return err;
1487
1488                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1489                                             MLXSW_REG_QEEC_HR_TC,
1490                                             i + 8, i,
1491                                             true, 100);
1492                 if (err)
1493                         return err;
1494         }
1495
1496         /* Make sure the max shaper is disabled in all hierarchies that support
1497          * it. Note that this disables ptps (PTP shaper), but that is intended
1498          * for the initial configuration.
1499          */
1500         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1501                                             MLXSW_REG_QEEC_HR_PORT, 0, 0,
1502                                             MLXSW_REG_QEEC_MAS_DIS, 0);
1503         if (err)
1504                 return err;
1505         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1506                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1507                                                     MLXSW_REG_QEEC_HR_SUBGROUP,
1508                                                     i, 0,
1509                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1510                 if (err)
1511                         return err;
1512         }
1513         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1514                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1515                                                     MLXSW_REG_QEEC_HR_TC,
1516                                                     i, i,
1517                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1518                 if (err)
1519                         return err;
1520
1521                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1522                                                     MLXSW_REG_QEEC_HR_TC,
1523                                                     i + 8, i,
1524                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1525                 if (err)
1526                         return err;
1527         }
1528
1529         /* Configure the min shaper for multicast TCs. */
1530         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1531                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1532                                                MLXSW_REG_QEEC_HR_TC,
1533                                                i + 8, i,
1534                                                MLXSW_REG_QEEC_MIS_MIN);
1535                 if (err)
1536                         return err;
1537         }
1538
1539         /* Map all priorities to traffic class 0. */
1540         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1541                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1542                 if (err)
1543                         return err;
1544         }
1545
1546         return 0;
1547 }
1548
1549 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1550                                         bool enable)
1551 {
1552         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1553         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1554
1555         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1556         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1557 }
1558
1559 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1560 {
1561         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1562         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1563         u8 module = mlxsw_sp_port->mapping.module;
1564         u64 overheat_counter;
1565         int err;
1566
1567         err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1568                                                     module, &overheat_counter);
1569         if (err)
1570                 return err;
1571
1572         mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1573         return 0;
1574 }
1575
1576 int
1577 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1578                                       bool is_8021ad_tagged,
1579                                       bool is_8021q_tagged)
1580 {
1581         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1582         char spvc_pl[MLXSW_REG_SPVC_LEN];
1583
1584         mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1585                             is_8021ad_tagged, is_8021q_tagged);
1586         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1587 }
1588
1589 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1590                                         u16 local_port, u8 *port_number,
1591                                         u8 *split_port_subnumber,
1592                                         u8 *slot_index)
1593 {
1594         char pllp_pl[MLXSW_REG_PLLP_LEN];
1595         int err;
1596
1597         mlxsw_reg_pllp_pack(pllp_pl, local_port);
1598         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1599         if (err)
1600                 return err;
1601         mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1602                               split_port_subnumber, slot_index);
1603         return 0;
1604 }
1605
1606 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1607                                 bool split,
1608                                 struct mlxsw_sp_port_mapping *port_mapping)
1609 {
1610         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1611         struct mlxsw_sp_port *mlxsw_sp_port;
1612         u32 lanes = port_mapping->width;
1613         u8 split_port_subnumber;
1614         struct net_device *dev;
1615         u8 port_number;
1616         u8 slot_index;
1617         bool splittable;
1618         int err;
1619
1620         err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1621         if (err) {
1622                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1623                         local_port);
1624                 return err;
1625         }
1626
1627         err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1628         if (err) {
1629                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1630                         local_port);
1631                 goto err_port_swid_set;
1632         }
1633
1634         err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1635                                            &split_port_subnumber, &slot_index);
1636         if (err) {
1637                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1638                         local_port);
1639                 goto err_port_label_info_get;
1640         }
1641
1642         splittable = lanes > 1 && !split;
1643         err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1644                                    port_number, split, split_port_subnumber,
1645                                    splittable, lanes, mlxsw_sp->base_mac,
1646                                    sizeof(mlxsw_sp->base_mac));
1647         if (err) {
1648                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1649                         local_port);
1650                 goto err_core_port_init;
1651         }
1652
1653         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1654         if (!dev) {
1655                 err = -ENOMEM;
1656                 goto err_alloc_etherdev;
1657         }
1658         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1659         dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1660         mlxsw_sp_port = netdev_priv(dev);
1661         mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
1662                                     mlxsw_sp_port, dev);
1663         mlxsw_sp_port->dev = dev;
1664         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1665         mlxsw_sp_port->local_port = local_port;
1666         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1667         mlxsw_sp_port->split = split;
1668         mlxsw_sp_port->mapping = *port_mapping;
1669         mlxsw_sp_port->link.autoneg = 1;
1670         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1671
1672         mlxsw_sp_port->pcpu_stats =
1673                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1674         if (!mlxsw_sp_port->pcpu_stats) {
1675                 err = -ENOMEM;
1676                 goto err_alloc_stats;
1677         }
1678
1679         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1680                           &update_stats_cache);
1681
1682         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1683         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1684
1685         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1686         if (err) {
1687                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1688                         mlxsw_sp_port->local_port);
1689                 goto err_dev_addr_init;
1690         }
1691
1692         netif_carrier_off(dev);
1693
1694         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1695                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1696         dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1697
1698         dev->min_mtu = 0;
1699         dev->max_mtu = ETH_MAX_MTU;
1700
1701         /* Each packet needs to have a Tx header (metadata) on top all other
1702          * headers.
1703          */
1704         dev->needed_headroom = MLXSW_TXHDR_LEN;
1705
1706         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1707         if (err) {
1708                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1709                         mlxsw_sp_port->local_port);
1710                 goto err_port_system_port_mapping_set;
1711         }
1712
1713         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1714         if (err) {
1715                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1716                         mlxsw_sp_port->local_port);
1717                 goto err_port_speed_by_width_set;
1718         }
1719
1720         err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1721                                                             &mlxsw_sp_port->max_speed);
1722         if (err) {
1723                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1724                         mlxsw_sp_port->local_port);
1725                 goto err_max_speed_get;
1726         }
1727
1728         err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1729         if (err) {
1730                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1731                         mlxsw_sp_port->local_port);
1732                 goto err_port_max_mtu_get;
1733         }
1734
1735         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1736         if (err) {
1737                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1738                         mlxsw_sp_port->local_port);
1739                 goto err_port_mtu_set;
1740         }
1741
1742         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1743         if (err)
1744                 goto err_port_admin_status_set;
1745
1746         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1747         if (err) {
1748                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1749                         mlxsw_sp_port->local_port);
1750                 goto err_port_buffers_init;
1751         }
1752
1753         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1754         if (err) {
1755                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1756                         mlxsw_sp_port->local_port);
1757                 goto err_port_ets_init;
1758         }
1759
1760         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1761         if (err) {
1762                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1763                         mlxsw_sp_port->local_port);
1764                 goto err_port_tc_mc_mode;
1765         }
1766
1767         /* ETS and buffers must be initialized before DCB. */
1768         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1769         if (err) {
1770                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1771                         mlxsw_sp_port->local_port);
1772                 goto err_port_dcb_init;
1773         }
1774
1775         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1776         if (err) {
1777                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1778                         mlxsw_sp_port->local_port);
1779                 goto err_port_fids_init;
1780         }
1781
1782         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1783         if (err) {
1784                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1785                         mlxsw_sp_port->local_port);
1786                 goto err_port_qdiscs_init;
1787         }
1788
1789         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1790                                      false);
1791         if (err) {
1792                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1793                         mlxsw_sp_port->local_port);
1794                 goto err_port_vlan_clear;
1795         }
1796
1797         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1798         if (err) {
1799                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1800                         mlxsw_sp_port->local_port);
1801                 goto err_port_nve_init;
1802         }
1803
1804         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1805                                      ETH_P_8021Q);
1806         if (err) {
1807                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1808                         mlxsw_sp_port->local_port);
1809                 goto err_port_pvid_set;
1810         }
1811
1812         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1813                                                        MLXSW_SP_DEFAULT_VID);
1814         if (IS_ERR(mlxsw_sp_port_vlan)) {
1815                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1816                         mlxsw_sp_port->local_port);
1817                 err = PTR_ERR(mlxsw_sp_port_vlan);
1818                 goto err_port_vlan_create;
1819         }
1820         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1821
1822         /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1823          * only packets with 802.1q header as tagged packets.
1824          */
1825         err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1826         if (err) {
1827                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1828                         local_port);
1829                 goto err_port_vlan_classification_set;
1830         }
1831
1832         INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1833                           mlxsw_sp->ptp_ops->shaper_work);
1834
1835         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1836
1837         err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1838         if (err) {
1839                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1840                         mlxsw_sp_port->local_port);
1841                 goto err_port_overheat_init_val_set;
1842         }
1843
1844         err = register_netdev(dev);
1845         if (err) {
1846                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1847                         mlxsw_sp_port->local_port);
1848                 goto err_register_netdev;
1849         }
1850
1851         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1852         return 0;
1853
1854 err_register_netdev:
1855 err_port_overheat_init_val_set:
1856         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1857 err_port_vlan_classification_set:
1858         mlxsw_sp->ports[local_port] = NULL;
1859         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1860 err_port_vlan_create:
1861 err_port_pvid_set:
1862         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1863 err_port_nve_init:
1864 err_port_vlan_clear:
1865         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1866 err_port_qdiscs_init:
1867         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1868 err_port_fids_init:
1869         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1870 err_port_dcb_init:
1871         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1872 err_port_tc_mc_mode:
1873 err_port_ets_init:
1874         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1875 err_port_buffers_init:
1876 err_port_admin_status_set:
1877 err_port_mtu_set:
1878 err_port_max_mtu_get:
1879 err_max_speed_get:
1880 err_port_speed_by_width_set:
1881 err_port_system_port_mapping_set:
1882 err_dev_addr_init:
1883         free_percpu(mlxsw_sp_port->pcpu_stats);
1884 err_alloc_stats:
1885         free_netdev(dev);
1886 err_alloc_etherdev:
1887         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1888 err_core_port_init:
1889 err_port_label_info_get:
1890         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1891                                MLXSW_PORT_SWID_DISABLED_PORT);
1892 err_port_swid_set:
1893         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1894                                    port_mapping->slot_index,
1895                                    port_mapping->module);
1896         return err;
1897 }
1898
1899 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1900 {
1901         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1902         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1903         u8 module = mlxsw_sp_port->mapping.module;
1904
1905         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1906         cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1907         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1908         mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1909         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1910         mlxsw_sp->ports[local_port] = NULL;
1911         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1912         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1913         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1914         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1915         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1916         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1917         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1918         free_percpu(mlxsw_sp_port->pcpu_stats);
1919         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1920         free_netdev(mlxsw_sp_port->dev);
1921         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1922         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1923                                MLXSW_PORT_SWID_DISABLED_PORT);
1924         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1925 }
1926
1927 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1928 {
1929         struct mlxsw_sp_port *mlxsw_sp_port;
1930         int err;
1931
1932         mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1933         if (!mlxsw_sp_port)
1934                 return -ENOMEM;
1935
1936         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1937         mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1938
1939         err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1940                                        mlxsw_sp_port,
1941                                        mlxsw_sp->base_mac,
1942                                        sizeof(mlxsw_sp->base_mac));
1943         if (err) {
1944                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1945                 goto err_core_cpu_port_init;
1946         }
1947
1948         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1949         return 0;
1950
1951 err_core_cpu_port_init:
1952         kfree(mlxsw_sp_port);
1953         return err;
1954 }
1955
1956 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1957 {
1958         struct mlxsw_sp_port *mlxsw_sp_port =
1959                                 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1960
1961         mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1962         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1963         kfree(mlxsw_sp_port);
1964 }
1965
1966 static bool mlxsw_sp_local_port_valid(u16 local_port)
1967 {
1968         return local_port != MLXSW_PORT_CPU_PORT;
1969 }
1970
1971 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1972 {
1973         if (!mlxsw_sp_local_port_valid(local_port))
1974                 return false;
1975         return mlxsw_sp->ports[local_port] != NULL;
1976 }
1977
1978 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1979                                            u16 local_port, bool enable)
1980 {
1981         char pmecr_pl[MLXSW_REG_PMECR_LEN];
1982
1983         mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1984                              enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1985                                       MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1986         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1987 }
1988
1989 struct mlxsw_sp_port_mapping_event {
1990         struct list_head list;
1991         char pmlp_pl[MLXSW_REG_PMLP_LEN];
1992 };
1993
1994 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1995 {
1996         struct mlxsw_sp_port_mapping_event *event, *next_event;
1997         struct mlxsw_sp_port_mapping_events *events;
1998         struct mlxsw_sp_port_mapping port_mapping;
1999         struct mlxsw_sp *mlxsw_sp;
2000         struct devlink *devlink;
2001         LIST_HEAD(event_queue);
2002         u16 local_port;
2003         int err;
2004
2005         events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
2006         mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
2007         devlink = priv_to_devlink(mlxsw_sp->core);
2008
2009         spin_lock_bh(&events->queue_lock);
2010         list_splice_init(&events->queue, &event_queue);
2011         spin_unlock_bh(&events->queue_lock);
2012
2013         list_for_each_entry_safe(event, next_event, &event_queue, list) {
2014                 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
2015                 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
2016                                                       event->pmlp_pl, &port_mapping);
2017                 if (err)
2018                         goto out;
2019
2020                 if (WARN_ON_ONCE(!port_mapping.width))
2021                         goto out;
2022
2023                 devl_lock(devlink);
2024
2025                 if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2026                         mlxsw_sp_port_create(mlxsw_sp, local_port,
2027                                              false, &port_mapping);
2028                 else
2029                         WARN_ON_ONCE(1);
2030
2031                 devl_unlock(devlink);
2032
2033                 mlxsw_sp->port_mapping[local_port] = port_mapping;
2034
2035 out:
2036                 kfree(event);
2037         }
2038 }
2039
2040 static void
2041 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2042                                     char *pmlp_pl, void *priv)
2043 {
2044         struct mlxsw_sp_port_mapping_events *events;
2045         struct mlxsw_sp_port_mapping_event *event;
2046         struct mlxsw_sp *mlxsw_sp = priv;
2047         u16 local_port;
2048
2049         local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
2050         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2051                 return;
2052
2053         events = &mlxsw_sp->port_mapping_events;
2054         event = kmalloc(sizeof(*event), GFP_ATOMIC);
2055         if (!event)
2056                 return;
2057         memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2058         spin_lock(&events->queue_lock);
2059         list_add_tail(&event->list, &events->queue);
2060         spin_unlock(&events->queue_lock);
2061         mlxsw_core_schedule_work(&events->work);
2062 }
2063
2064 static void
2065 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2066 {
2067         struct mlxsw_sp_port_mapping_event *event, *next_event;
2068         struct mlxsw_sp_port_mapping_events *events;
2069
2070         events = &mlxsw_sp->port_mapping_events;
2071
2072         /* Caller needs to make sure that no new event is going to appear. */
2073         cancel_work_sync(&events->work);
2074         list_for_each_entry_safe(event, next_event, &events->queue, list) {
2075                 list_del(&event->list);
2076                 kfree(event);
2077         }
2078 }
2079
2080 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2081 {
2082         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2083         int i;
2084
2085         for (i = 1; i < max_ports; i++)
2086                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2087         /* Make sure all scheduled events are processed */
2088         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2089
2090         for (i = 1; i < max_ports; i++)
2091                 if (mlxsw_sp_port_created(mlxsw_sp, i))
2092                         mlxsw_sp_port_remove(mlxsw_sp, i);
2093         mlxsw_sp_cpu_port_remove(mlxsw_sp);
2094         kfree(mlxsw_sp->ports);
2095         mlxsw_sp->ports = NULL;
2096 }
2097
2098 static void
2099 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2100                                bool (*selector)(void *priv, u16 local_port),
2101                                void *priv)
2102 {
2103         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2104         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2105         int i;
2106
2107         for (i = 1; i < max_ports; i++)
2108                 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
2109                         mlxsw_sp_port_remove(mlxsw_sp, i);
2110 }
2111
2112 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2113 {
2114         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2115         struct mlxsw_sp_port_mapping_events *events;
2116         struct mlxsw_sp_port_mapping *port_mapping;
2117         size_t alloc_size;
2118         int i;
2119         int err;
2120
2121         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2122         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2123         if (!mlxsw_sp->ports)
2124                 return -ENOMEM;
2125
2126         events = &mlxsw_sp->port_mapping_events;
2127         INIT_LIST_HEAD(&events->queue);
2128         spin_lock_init(&events->queue_lock);
2129         INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2130
2131         for (i = 1; i < max_ports; i++) {
2132                 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2133                 if (err)
2134                         goto err_event_enable;
2135         }
2136
2137         err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2138         if (err)
2139                 goto err_cpu_port_create;
2140
2141         for (i = 1; i < max_ports; i++) {
2142                 port_mapping = &mlxsw_sp->port_mapping[i];
2143                 if (!port_mapping->width)
2144                         continue;
2145                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2146                 if (err)
2147                         goto err_port_create;
2148         }
2149         return 0;
2150
2151 err_port_create:
2152         for (i--; i >= 1; i--)
2153                 if (mlxsw_sp_port_created(mlxsw_sp, i))
2154                         mlxsw_sp_port_remove(mlxsw_sp, i);
2155         i = max_ports;
2156         mlxsw_sp_cpu_port_remove(mlxsw_sp);
2157 err_cpu_port_create:
2158 err_event_enable:
2159         for (i--; i >= 1; i--)
2160                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2161         /* Make sure all scheduled events are processed */
2162         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2163         kfree(mlxsw_sp->ports);
2164         mlxsw_sp->ports = NULL;
2165         return err;
2166 }
2167
2168 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2169 {
2170         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2171         struct mlxsw_sp_port_mapping *port_mapping;
2172         int i;
2173         int err;
2174
2175         mlxsw_sp->port_mapping = kcalloc(max_ports,
2176                                          sizeof(struct mlxsw_sp_port_mapping),
2177                                          GFP_KERNEL);
2178         if (!mlxsw_sp->port_mapping)
2179                 return -ENOMEM;
2180
2181         for (i = 1; i < max_ports; i++) {
2182                 port_mapping = &mlxsw_sp->port_mapping[i];
2183                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2184                 if (err)
2185                         goto err_port_module_info_get;
2186         }
2187         return 0;
2188
2189 err_port_module_info_get:
2190         kfree(mlxsw_sp->port_mapping);
2191         return err;
2192 }
2193
2194 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2195 {
2196         kfree(mlxsw_sp->port_mapping);
2197 }
2198
2199 static int
2200 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2201                            struct mlxsw_sp_port_mapping *port_mapping,
2202                            unsigned int count, const char *pmtdb_pl)
2203 {
2204         struct mlxsw_sp_port_mapping split_port_mapping;
2205         int err, i;
2206
2207         split_port_mapping = *port_mapping;
2208         split_port_mapping.width /= count;
2209         for (i = 0; i < count; i++) {
2210                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2211
2212                 if (!mlxsw_sp_local_port_valid(s_local_port))
2213                         continue;
2214
2215                 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2216                                            true, &split_port_mapping);
2217                 if (err)
2218                         goto err_port_create;
2219                 split_port_mapping.lane += split_port_mapping.width;
2220         }
2221
2222         return 0;
2223
2224 err_port_create:
2225         for (i--; i >= 0; i--) {
2226                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2227
2228                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2229                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2230         }
2231         return err;
2232 }
2233
2234 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2235                                          unsigned int count,
2236                                          const char *pmtdb_pl)
2237 {
2238         struct mlxsw_sp_port_mapping *port_mapping;
2239         int i;
2240
2241         /* Go over original unsplit ports in the gap and recreate them. */
2242         for (i = 0; i < count; i++) {
2243                 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2244
2245                 port_mapping = &mlxsw_sp->port_mapping[local_port];
2246                 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2247                         continue;
2248                 mlxsw_sp_port_create(mlxsw_sp, local_port,
2249                                      false, port_mapping);
2250         }
2251 }
2252
2253 static struct mlxsw_sp_port *
2254 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2255 {
2256         if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2257                 return mlxsw_sp->ports[local_port];
2258         return NULL;
2259 }
2260
2261 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2262                                unsigned int count,
2263                                struct netlink_ext_ack *extack)
2264 {
2265         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2266         struct mlxsw_sp_port_mapping port_mapping;
2267         struct mlxsw_sp_port *mlxsw_sp_port;
2268         enum mlxsw_reg_pmtdb_status status;
2269         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2270         int i;
2271         int err;
2272
2273         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2274         if (!mlxsw_sp_port) {
2275                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2276                         local_port);
2277                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2278                 return -EINVAL;
2279         }
2280
2281         if (mlxsw_sp_port->split) {
2282                 NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2283                 return -EINVAL;
2284         }
2285
2286         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2287                              mlxsw_sp_port->mapping.module,
2288                              mlxsw_sp_port->mapping.module_width / count,
2289                              count);
2290         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2291         if (err) {
2292                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2293                 return err;
2294         }
2295
2296         status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2297         if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2298                 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2299                 return -EINVAL;
2300         }
2301
2302         port_mapping = mlxsw_sp_port->mapping;
2303
2304         for (i = 0; i < count; i++) {
2305                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2306
2307                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2308                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2309         }
2310
2311         err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2312                                          count, pmtdb_pl);
2313         if (err) {
2314                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2315                 goto err_port_split_create;
2316         }
2317
2318         return 0;
2319
2320 err_port_split_create:
2321         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2322
2323         return err;
2324 }
2325
2326 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2327                                  struct netlink_ext_ack *extack)
2328 {
2329         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2330         struct mlxsw_sp_port *mlxsw_sp_port;
2331         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2332         unsigned int count;
2333         int i;
2334         int err;
2335
2336         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2337         if (!mlxsw_sp_port) {
2338                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2339                         local_port);
2340                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2341                 return -EINVAL;
2342         }
2343
2344         if (!mlxsw_sp_port->split) {
2345                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2346                 return -EINVAL;
2347         }
2348
2349         count = mlxsw_sp_port->mapping.module_width /
2350                 mlxsw_sp_port->mapping.width;
2351
2352         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2353                              mlxsw_sp_port->mapping.module,
2354                              mlxsw_sp_port->mapping.module_width / count,
2355                              count);
2356         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2357         if (err) {
2358                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2359                 return err;
2360         }
2361
2362         for (i = 0; i < count; i++) {
2363                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2364
2365                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2366                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2367         }
2368
2369         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2370
2371         return 0;
2372 }
2373
2374 static void
2375 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2376 {
2377         int i;
2378
2379         for (i = 0; i < TC_MAX_QUEUE; i++)
2380                 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2381 }
2382
2383 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2384                                      char *pude_pl, void *priv)
2385 {
2386         struct mlxsw_sp *mlxsw_sp = priv;
2387         struct mlxsw_sp_port *mlxsw_sp_port;
2388         enum mlxsw_reg_pude_oper_status status;
2389         u16 local_port;
2390
2391         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2392
2393         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2394                 return;
2395         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2396         if (!mlxsw_sp_port)
2397                 return;
2398
2399         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2400         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2401                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2402                 netif_carrier_on(mlxsw_sp_port->dev);
2403                 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2404         } else {
2405                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2406                 netif_carrier_off(mlxsw_sp_port->dev);
2407                 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2408         }
2409 }
2410
2411 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2412                                           char *mtpptr_pl, bool ingress)
2413 {
2414         u16 local_port;
2415         u8 num_rec;
2416         int i;
2417
2418         local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2419         num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2420         for (i = 0; i < num_rec; i++) {
2421                 u8 domain_number;
2422                 u8 message_type;
2423                 u16 sequence_id;
2424                 u64 timestamp;
2425
2426                 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2427                                         &domain_number, &sequence_id,
2428                                         &timestamp);
2429                 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2430                                             message_type, domain_number,
2431                                             sequence_id, timestamp);
2432         }
2433 }
2434
2435 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2436                                               char *mtpptr_pl, void *priv)
2437 {
2438         struct mlxsw_sp *mlxsw_sp = priv;
2439
2440         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2441 }
2442
2443 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2444                                               char *mtpptr_pl, void *priv)
2445 {
2446         struct mlxsw_sp *mlxsw_sp = priv;
2447
2448         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2449 }
2450
2451 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2452                                        u16 local_port, void *priv)
2453 {
2454         struct mlxsw_sp *mlxsw_sp = priv;
2455         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2456         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2457
2458         if (unlikely(!mlxsw_sp_port)) {
2459                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2460                                      local_port);
2461                 return;
2462         }
2463
2464         skb->dev = mlxsw_sp_port->dev;
2465
2466         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2467         u64_stats_update_begin(&pcpu_stats->syncp);
2468         pcpu_stats->rx_packets++;
2469         pcpu_stats->rx_bytes += skb->len;
2470         u64_stats_update_end(&pcpu_stats->syncp);
2471
2472         skb->protocol = eth_type_trans(skb, skb->dev);
2473         netif_receive_skb(skb);
2474 }
2475
2476 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2477                                            void *priv)
2478 {
2479         skb->offload_fwd_mark = 1;
2480         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2481 }
2482
2483 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2484                                               u16 local_port, void *priv)
2485 {
2486         skb->offload_l3_fwd_mark = 1;
2487         skb->offload_fwd_mark = 1;
2488         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2489 }
2490
2491 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2492                           u16 local_port)
2493 {
2494         mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2495 }
2496
2497 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2498         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2499                   _is_ctrl, SP_##_trap_group, DISCARD)
2500
2501 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
2502         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
2503                 _is_ctrl, SP_##_trap_group, DISCARD)
2504
2505 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2506         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2507                 _is_ctrl, SP_##_trap_group, DISCARD)
2508
2509 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
2510         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2511
2512 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2513         /* Events */
2514         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2515         /* L2 traps */
2516         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2517         /* L3 traps */
2518         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2519                           false),
2520         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2521         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2522                           false),
2523         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2524                              ROUTER_EXP, false),
2525         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2526                              ROUTER_EXP, false),
2527         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2528                              ROUTER_EXP, false),
2529         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2530                              ROUTER_EXP, false),
2531         /* Multicast Router Traps */
2532         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2533         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2534         /* NVE traps */
2535         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2536 };
2537
2538 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2539         /* Events */
2540         MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2541         MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2542 };
2543
2544 static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2545         /* Events */
2546         MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2547 };
2548
2549 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2550 {
2551         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2552         char qpcr_pl[MLXSW_REG_QPCR_LEN];
2553         enum mlxsw_reg_qpcr_ir_units ir_units;
2554         int max_cpu_policers;
2555         bool is_bytes;
2556         u8 burst_size;
2557         u32 rate;
2558         int i, err;
2559
2560         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2561                 return -EIO;
2562
2563         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2564
2565         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2566         for (i = 0; i < max_cpu_policers; i++) {
2567                 is_bytes = false;
2568                 switch (i) {
2569                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2570                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2571                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2572                         rate = 1024;
2573                         burst_size = 7;
2574                         break;
2575                 default:
2576                         continue;
2577                 }
2578
2579                 __set_bit(i, mlxsw_sp->trap->policers_usage);
2580                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2581                                     burst_size);
2582                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2583                 if (err)
2584                         return err;
2585         }
2586
2587         return 0;
2588 }
2589
2590 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2591 {
2592         char htgt_pl[MLXSW_REG_HTGT_LEN];
2593         enum mlxsw_reg_htgt_trap_group i;
2594         int max_cpu_policers;
2595         int max_trap_groups;
2596         u8 priority, tc;
2597         u16 policer_id;
2598         int err;
2599
2600         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2601                 return -EIO;
2602
2603         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2604         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2605
2606         for (i = 0; i < max_trap_groups; i++) {
2607                 policer_id = i;
2608                 switch (i) {
2609                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2610                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2611                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2612                         priority = 1;
2613                         tc = 1;
2614                         break;
2615                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2616                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2617                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
2618                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2619                         break;
2620                 default:
2621                         continue;
2622                 }
2623
2624                 if (max_cpu_policers <= policer_id &&
2625                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2626                         return -EIO;
2627
2628                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2629                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2630                 if (err)
2631                         return err;
2632         }
2633
2634         return 0;
2635 }
2636
2637 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2638 {
2639         struct mlxsw_sp_trap *trap;
2640         u64 max_policers;
2641         int err;
2642
2643         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2644                 return -EIO;
2645         max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2646         trap = kzalloc(struct_size(trap, policers_usage,
2647                                    BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2648         if (!trap)
2649                 return -ENOMEM;
2650         trap->max_policers = max_policers;
2651         mlxsw_sp->trap = trap;
2652
2653         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2654         if (err)
2655                 goto err_cpu_policers_set;
2656
2657         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2658         if (err)
2659                 goto err_trap_groups_set;
2660
2661         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2662                                         ARRAY_SIZE(mlxsw_sp_listener),
2663                                         mlxsw_sp);
2664         if (err)
2665                 goto err_traps_register;
2666
2667         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2668                                         mlxsw_sp->listeners_count, mlxsw_sp);
2669         if (err)
2670                 goto err_extra_traps_init;
2671
2672         return 0;
2673
2674 err_extra_traps_init:
2675         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2676                                     ARRAY_SIZE(mlxsw_sp_listener),
2677                                     mlxsw_sp);
2678 err_traps_register:
2679 err_trap_groups_set:
2680 err_cpu_policers_set:
2681         kfree(trap);
2682         return err;
2683 }
2684
2685 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2686 {
2687         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2688                                     mlxsw_sp->listeners_count,
2689                                     mlxsw_sp);
2690         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2691                                     ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2692         kfree(mlxsw_sp->trap);
2693 }
2694
2695 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2696
2697 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2698 {
2699         char slcr_pl[MLXSW_REG_SLCR_LEN];
2700         u16 max_lag;
2701         u32 seed;
2702         int err;
2703
2704         seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2705                      MLXSW_SP_LAG_SEED_INIT);
2706         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2707                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2708                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2709                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2710                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2711                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2712                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2713                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2714                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2715         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2716         if (err)
2717                 return err;
2718
2719         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2720         if (err)
2721                 return err;
2722
2723         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2724                 return -EIO;
2725
2726         mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
2727                                  GFP_KERNEL);
2728         if (!mlxsw_sp->lags)
2729                 return -ENOMEM;
2730
2731         return 0;
2732 }
2733
2734 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2735 {
2736         kfree(mlxsw_sp->lags);
2737 }
2738
2739 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2740         .clock_init     = mlxsw_sp1_ptp_clock_init,
2741         .clock_fini     = mlxsw_sp1_ptp_clock_fini,
2742         .init           = mlxsw_sp1_ptp_init,
2743         .fini           = mlxsw_sp1_ptp_fini,
2744         .receive        = mlxsw_sp1_ptp_receive,
2745         .transmitted    = mlxsw_sp1_ptp_transmitted,
2746         .hwtstamp_get   = mlxsw_sp1_ptp_hwtstamp_get,
2747         .hwtstamp_set   = mlxsw_sp1_ptp_hwtstamp_set,
2748         .shaper_work    = mlxsw_sp1_ptp_shaper_work,
2749         .get_ts_info    = mlxsw_sp1_ptp_get_ts_info,
2750         .get_stats_count = mlxsw_sp1_get_stats_count,
2751         .get_stats_strings = mlxsw_sp1_get_stats_strings,
2752         .get_stats      = mlxsw_sp1_get_stats,
2753         .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2754 };
2755
2756 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2757         .clock_init     = mlxsw_sp2_ptp_clock_init,
2758         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2759         .init           = mlxsw_sp2_ptp_init,
2760         .fini           = mlxsw_sp2_ptp_fini,
2761         .receive        = mlxsw_sp2_ptp_receive,
2762         .transmitted    = mlxsw_sp2_ptp_transmitted,
2763         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2764         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2765         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2766         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2767         .get_stats_count = mlxsw_sp2_get_stats_count,
2768         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2769         .get_stats      = mlxsw_sp2_get_stats,
2770         .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2771 };
2772
2773 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2774         .clock_init     = mlxsw_sp2_ptp_clock_init,
2775         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2776         .init           = mlxsw_sp2_ptp_init,
2777         .fini           = mlxsw_sp2_ptp_fini,
2778         .receive        = mlxsw_sp2_ptp_receive,
2779         .transmitted    = mlxsw_sp2_ptp_transmitted,
2780         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2781         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2782         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2783         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2784         .get_stats_count = mlxsw_sp2_get_stats_count,
2785         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2786         .get_stats      = mlxsw_sp2_get_stats,
2787         .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2788 };
2789
2790 struct mlxsw_sp_sample_trigger_node {
2791         struct mlxsw_sp_sample_trigger trigger;
2792         struct mlxsw_sp_sample_params params;
2793         struct rhash_head ht_node;
2794         struct rcu_head rcu;
2795         refcount_t refcount;
2796 };
2797
2798 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2799         .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2800         .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2801         .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2802         .automatic_shrinking = true,
2803 };
2804
2805 static void
2806 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2807                                  const struct mlxsw_sp_sample_trigger *trigger)
2808 {
2809         memset(key, 0, sizeof(*key));
2810         key->type = trigger->type;
2811         key->local_port = trigger->local_port;
2812 }
2813
2814 /* RCU read lock must be held */
2815 struct mlxsw_sp_sample_params *
2816 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2817                                       const struct mlxsw_sp_sample_trigger *trigger)
2818 {
2819         struct mlxsw_sp_sample_trigger_node *trigger_node;
2820         struct mlxsw_sp_sample_trigger key;
2821
2822         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2823         trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2824                                          mlxsw_sp_sample_trigger_ht_params);
2825         if (!trigger_node)
2826                 return NULL;
2827
2828         return &trigger_node->params;
2829 }
2830
2831 static int
2832 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2833                                   const struct mlxsw_sp_sample_trigger *trigger,
2834                                   const struct mlxsw_sp_sample_params *params)
2835 {
2836         struct mlxsw_sp_sample_trigger_node *trigger_node;
2837         int err;
2838
2839         trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2840         if (!trigger_node)
2841                 return -ENOMEM;
2842
2843         trigger_node->trigger = *trigger;
2844         trigger_node->params = *params;
2845         refcount_set(&trigger_node->refcount, 1);
2846
2847         err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2848                                      &trigger_node->ht_node,
2849                                      mlxsw_sp_sample_trigger_ht_params);
2850         if (err)
2851                 goto err_rhashtable_insert;
2852
2853         return 0;
2854
2855 err_rhashtable_insert:
2856         kfree(trigger_node);
2857         return err;
2858 }
2859
2860 static void
2861 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2862                                   struct mlxsw_sp_sample_trigger_node *trigger_node)
2863 {
2864         rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2865                                &trigger_node->ht_node,
2866                                mlxsw_sp_sample_trigger_ht_params);
2867         kfree_rcu(trigger_node, rcu);
2868 }
2869
2870 int
2871 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2872                                    const struct mlxsw_sp_sample_trigger *trigger,
2873                                    const struct mlxsw_sp_sample_params *params,
2874                                    struct netlink_ext_ack *extack)
2875 {
2876         struct mlxsw_sp_sample_trigger_node *trigger_node;
2877         struct mlxsw_sp_sample_trigger key;
2878
2879         ASSERT_RTNL();
2880
2881         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2882
2883         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2884                                               &key,
2885                                               mlxsw_sp_sample_trigger_ht_params);
2886         if (!trigger_node)
2887                 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2888                                                          params);
2889
2890         if (trigger_node->trigger.local_port) {
2891                 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2892                 return -EINVAL;
2893         }
2894
2895         if (trigger_node->params.psample_group != params->psample_group ||
2896             trigger_node->params.truncate != params->truncate ||
2897             trigger_node->params.rate != params->rate ||
2898             trigger_node->params.trunc_size != params->trunc_size) {
2899                 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2900                 return -EINVAL;
2901         }
2902
2903         refcount_inc(&trigger_node->refcount);
2904
2905         return 0;
2906 }
2907
2908 void
2909 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2910                                      const struct mlxsw_sp_sample_trigger *trigger)
2911 {
2912         struct mlxsw_sp_sample_trigger_node *trigger_node;
2913         struct mlxsw_sp_sample_trigger key;
2914
2915         ASSERT_RTNL();
2916
2917         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2918
2919         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2920                                               &key,
2921                                               mlxsw_sp_sample_trigger_ht_params);
2922         if (!trigger_node)
2923                 return;
2924
2925         if (!refcount_dec_and_test(&trigger_node->refcount))
2926                 return;
2927
2928         mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2929 }
2930
2931 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2932                                     unsigned long event, void *ptr);
2933
2934 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
2935 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
2936 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
2937
2938 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
2939 {
2940         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
2941         mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
2942         mutex_init(&mlxsw_sp->parsing.lock);
2943 }
2944
2945 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
2946 {
2947         mutex_destroy(&mlxsw_sp->parsing.lock);
2948 }
2949
2950 struct mlxsw_sp_ipv6_addr_node {
2951         struct in6_addr key;
2952         struct rhash_head ht_node;
2953         u32 kvdl_index;
2954         refcount_t refcount;
2955 };
2956
2957 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
2958         .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
2959         .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
2960         .key_len = sizeof(struct in6_addr),
2961         .automatic_shrinking = true,
2962 };
2963
2964 static int
2965 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
2966                         u32 *p_kvdl_index)
2967 {
2968         struct mlxsw_sp_ipv6_addr_node *node;
2969         char rips_pl[MLXSW_REG_RIPS_LEN];
2970         int err;
2971
2972         err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
2973                                   MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
2974                                   p_kvdl_index);
2975         if (err)
2976                 return err;
2977
2978         mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
2979         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
2980         if (err)
2981                 goto err_rips_write;
2982
2983         node = kzalloc(sizeof(*node), GFP_KERNEL);
2984         if (!node) {
2985                 err = -ENOMEM;
2986                 goto err_node_alloc;
2987         }
2988
2989         node->key = *addr6;
2990         node->kvdl_index = *p_kvdl_index;
2991         refcount_set(&node->refcount, 1);
2992
2993         err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
2994                                      &node->ht_node,
2995                                      mlxsw_sp_ipv6_addr_ht_params);
2996         if (err)
2997                 goto err_rhashtable_insert;
2998
2999         return 0;
3000
3001 err_rhashtable_insert:
3002         kfree(node);
3003 err_node_alloc:
3004 err_rips_write:
3005         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3006                            *p_kvdl_index);
3007         return err;
3008 }
3009
3010 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3011                                     struct mlxsw_sp_ipv6_addr_node *node)
3012 {
3013         u32 kvdl_index = node->kvdl_index;
3014
3015         rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
3016                                mlxsw_sp_ipv6_addr_ht_params);
3017         kfree(node);
3018         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3019                            kvdl_index);
3020 }
3021
3022 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3023                                       const struct in6_addr *addr6,
3024                                       u32 *p_kvdl_index)
3025 {
3026         struct mlxsw_sp_ipv6_addr_node *node;
3027         int err = 0;
3028
3029         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3030         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3031                                       mlxsw_sp_ipv6_addr_ht_params);
3032         if (node) {
3033                 refcount_inc(&node->refcount);
3034                 *p_kvdl_index = node->kvdl_index;
3035                 goto out_unlock;
3036         }
3037
3038         err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3039
3040 out_unlock:
3041         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3042         return err;
3043 }
3044
3045 void
3046 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3047 {
3048         struct mlxsw_sp_ipv6_addr_node *node;
3049
3050         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3051         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3052                                       mlxsw_sp_ipv6_addr_ht_params);
3053         if (WARN_ON(!node))
3054                 goto out_unlock;
3055
3056         if (!refcount_dec_and_test(&node->refcount))
3057                 goto out_unlock;
3058
3059         mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3060
3061 out_unlock:
3062         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3063 }
3064
3065 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3066 {
3067         int err;
3068
3069         err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3070                               &mlxsw_sp_ipv6_addr_ht_params);
3071         if (err)
3072                 return err;
3073
3074         mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3075         return 0;
3076 }
3077
3078 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3079 {
3080         mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3081         rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3082 }
3083
3084 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3085                          const struct mlxsw_bus_info *mlxsw_bus_info,
3086                          struct netlink_ext_ack *extack)
3087 {
3088         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3089         int err;
3090
3091         mlxsw_sp->core = mlxsw_core;
3092         mlxsw_sp->bus_info = mlxsw_bus_info;
3093
3094         mlxsw_sp_parsing_init(mlxsw_sp);
3095         mlxsw_core_emad_string_tlv_enable(mlxsw_core);
3096
3097         err = mlxsw_sp_base_mac_get(mlxsw_sp);
3098         if (err) {
3099                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3100                 return err;
3101         }
3102
3103         err = mlxsw_sp_kvdl_init(mlxsw_sp);
3104         if (err) {
3105                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3106                 return err;
3107         }
3108
3109         err = mlxsw_sp_pgt_init(mlxsw_sp);
3110         if (err) {
3111                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3112                 goto err_pgt_init;
3113         }
3114
3115         err = mlxsw_sp_fids_init(mlxsw_sp);
3116         if (err) {
3117                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3118                 goto err_fids_init;
3119         }
3120
3121         err = mlxsw_sp_policers_init(mlxsw_sp);
3122         if (err) {
3123                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3124                 goto err_policers_init;
3125         }
3126
3127         err = mlxsw_sp_traps_init(mlxsw_sp);
3128         if (err) {
3129                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3130                 goto err_traps_init;
3131         }
3132
3133         err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3134         if (err) {
3135                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3136                 goto err_devlink_traps_init;
3137         }
3138
3139         err = mlxsw_sp_buffers_init(mlxsw_sp);
3140         if (err) {
3141                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3142                 goto err_buffers_init;
3143         }
3144
3145         err = mlxsw_sp_lag_init(mlxsw_sp);
3146         if (err) {
3147                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3148                 goto err_lag_init;
3149         }
3150
3151         /* Initialize SPAN before router and switchdev, so that those components
3152          * can call mlxsw_sp_span_respin().
3153          */
3154         err = mlxsw_sp_span_init(mlxsw_sp);
3155         if (err) {
3156                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3157                 goto err_span_init;
3158         }
3159
3160         err = mlxsw_sp_switchdev_init(mlxsw_sp);
3161         if (err) {
3162                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3163                 goto err_switchdev_init;
3164         }
3165
3166         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3167         if (err) {
3168                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3169                 goto err_counter_pool_init;
3170         }
3171
3172         err = mlxsw_sp_afa_init(mlxsw_sp);
3173         if (err) {
3174                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3175                 goto err_afa_init;
3176         }
3177
3178         err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3179         if (err) {
3180                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3181                 goto err_ipv6_addr_ht_init;
3182         }
3183
3184         err = mlxsw_sp_nve_init(mlxsw_sp);
3185         if (err) {
3186                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3187                 goto err_nve_init;
3188         }
3189
3190         err = mlxsw_sp_acl_init(mlxsw_sp);
3191         if (err) {
3192                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3193                 goto err_acl_init;
3194         }
3195
3196         err = mlxsw_sp_router_init(mlxsw_sp, extack);
3197         if (err) {
3198                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3199                 goto err_router_init;
3200         }
3201
3202         if (mlxsw_sp->bus_info->read_clock_capable) {
3203                 /* NULL is a valid return value from clock_init */
3204                 mlxsw_sp->clock =
3205                         mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3206                                                       mlxsw_sp->bus_info->dev);
3207                 if (IS_ERR(mlxsw_sp->clock)) {
3208                         err = PTR_ERR(mlxsw_sp->clock);
3209                         dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3210                         goto err_ptp_clock_init;
3211                 }
3212         }
3213
3214         if (mlxsw_sp->clock) {
3215                 /* NULL is a valid return value from ptp_ops->init */
3216                 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3217                 if (IS_ERR(mlxsw_sp->ptp_state)) {
3218                         err = PTR_ERR(mlxsw_sp->ptp_state);
3219                         dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3220                         goto err_ptp_init;
3221                 }
3222         }
3223
3224         /* Initialize netdevice notifier after SPAN is initialized, so that the
3225          * event handler can call SPAN respin.
3226          */
3227         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3228         err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3229                                               &mlxsw_sp->netdevice_nb);
3230         if (err) {
3231                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3232                 goto err_netdev_notifier;
3233         }
3234
3235         err = mlxsw_sp_dpipe_init(mlxsw_sp);
3236         if (err) {
3237                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3238                 goto err_dpipe_init;
3239         }
3240
3241         err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3242         if (err) {
3243                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3244                 goto err_port_module_info_init;
3245         }
3246
3247         err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3248                               &mlxsw_sp_sample_trigger_ht_params);
3249         if (err) {
3250                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3251                 goto err_sample_trigger_init;
3252         }
3253
3254         err = mlxsw_sp_ports_create(mlxsw_sp);
3255         if (err) {
3256                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3257                 goto err_ports_create;
3258         }
3259
3260         return 0;
3261
3262 err_ports_create:
3263         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3264 err_sample_trigger_init:
3265         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3266 err_port_module_info_init:
3267         mlxsw_sp_dpipe_fini(mlxsw_sp);
3268 err_dpipe_init:
3269         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3270                                           &mlxsw_sp->netdevice_nb);
3271 err_netdev_notifier:
3272         if (mlxsw_sp->clock)
3273                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3274 err_ptp_init:
3275         if (mlxsw_sp->clock)
3276                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3277 err_ptp_clock_init:
3278         mlxsw_sp_router_fini(mlxsw_sp);
3279 err_router_init:
3280         mlxsw_sp_acl_fini(mlxsw_sp);
3281 err_acl_init:
3282         mlxsw_sp_nve_fini(mlxsw_sp);
3283 err_nve_init:
3284         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3285 err_ipv6_addr_ht_init:
3286         mlxsw_sp_afa_fini(mlxsw_sp);
3287 err_afa_init:
3288         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3289 err_counter_pool_init:
3290         mlxsw_sp_switchdev_fini(mlxsw_sp);
3291 err_switchdev_init:
3292         mlxsw_sp_span_fini(mlxsw_sp);
3293 err_span_init:
3294         mlxsw_sp_lag_fini(mlxsw_sp);
3295 err_lag_init:
3296         mlxsw_sp_buffers_fini(mlxsw_sp);
3297 err_buffers_init:
3298         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3299 err_devlink_traps_init:
3300         mlxsw_sp_traps_fini(mlxsw_sp);
3301 err_traps_init:
3302         mlxsw_sp_policers_fini(mlxsw_sp);
3303 err_policers_init:
3304         mlxsw_sp_fids_fini(mlxsw_sp);
3305 err_fids_init:
3306         mlxsw_sp_pgt_fini(mlxsw_sp);
3307 err_pgt_init:
3308         mlxsw_sp_kvdl_fini(mlxsw_sp);
3309         mlxsw_sp_parsing_fini(mlxsw_sp);
3310         return err;
3311 }
3312
3313 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3314                           const struct mlxsw_bus_info *mlxsw_bus_info,
3315                           struct netlink_ext_ack *extack)
3316 {
3317         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3318
3319         mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3320         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3321         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3322         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3323         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3324         mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3325         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3326         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3327         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3328         mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3329         mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3330         mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3331         mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3332         mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3333         mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3334         mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3335         mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3336         mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3337         mlxsw_sp->listeners = mlxsw_sp1_listener;
3338         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3339         mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
3340         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3341         mlxsw_sp->pgt_smpe_index_valid = true;
3342
3343         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3344 }
3345
3346 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3347                           const struct mlxsw_bus_info *mlxsw_bus_info,
3348                           struct netlink_ext_ack *extack)
3349 {
3350         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3351
3352         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3353         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3354         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3355         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3356         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3357         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3358         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3359         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3360         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3361         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3362         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3363         mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3364         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3365         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3366         mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3367         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3368         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3369         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3370         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3371         mlxsw_sp->listeners = mlxsw_sp2_listener;
3372         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3373         mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3374         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3375         mlxsw_sp->pgt_smpe_index_valid = false;
3376
3377         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3378 }
3379
3380 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3381                           const struct mlxsw_bus_info *mlxsw_bus_info,
3382                           struct netlink_ext_ack *extack)
3383 {
3384         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3385
3386         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3387         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3388         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3389         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3390         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3391         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3392         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3393         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3394         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3395         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3396         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3397         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3398         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3399         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3400         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3401         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3402         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3403         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3404         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3405         mlxsw_sp->listeners = mlxsw_sp2_listener;
3406         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3407         mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3408         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3409         mlxsw_sp->pgt_smpe_index_valid = false;
3410
3411         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3412 }
3413
3414 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3415                           const struct mlxsw_bus_info *mlxsw_bus_info,
3416                           struct netlink_ext_ack *extack)
3417 {
3418         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3419
3420         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3421         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3422         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3423         mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3424         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3425         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3426         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3427         mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3428         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3429         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3430         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3431         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3432         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3433         mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3434         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3435         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3436         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3437         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3438         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3439         mlxsw_sp->listeners = mlxsw_sp2_listener;
3440         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3441         mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3442         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3443         mlxsw_sp->pgt_smpe_index_valid = false;
3444
3445         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3446 }
3447
3448 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3449 {
3450         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3451
3452         mlxsw_sp_ports_remove(mlxsw_sp);
3453         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3454         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3455         mlxsw_sp_dpipe_fini(mlxsw_sp);
3456         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3457                                           &mlxsw_sp->netdevice_nb);
3458         if (mlxsw_sp->clock) {
3459                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3460                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3461         }
3462         mlxsw_sp_router_fini(mlxsw_sp);
3463         mlxsw_sp_acl_fini(mlxsw_sp);
3464         mlxsw_sp_nve_fini(mlxsw_sp);
3465         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3466         mlxsw_sp_afa_fini(mlxsw_sp);
3467         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3468         mlxsw_sp_switchdev_fini(mlxsw_sp);
3469         mlxsw_sp_span_fini(mlxsw_sp);
3470         mlxsw_sp_lag_fini(mlxsw_sp);
3471         mlxsw_sp_buffers_fini(mlxsw_sp);
3472         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3473         mlxsw_sp_traps_fini(mlxsw_sp);
3474         mlxsw_sp_policers_fini(mlxsw_sp);
3475         mlxsw_sp_fids_fini(mlxsw_sp);
3476         mlxsw_sp_pgt_fini(mlxsw_sp);
3477         mlxsw_sp_kvdl_fini(mlxsw_sp);
3478         mlxsw_sp_parsing_fini(mlxsw_sp);
3479 }
3480
3481 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3482         .used_flood_mode                = 1,
3483         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3484         .used_max_ib_mc                 = 1,
3485         .max_ib_mc                      = 0,
3486         .used_max_pkey                  = 1,
3487         .max_pkey                       = 0,
3488         .used_ubridge                   = 1,
3489         .ubridge                        = 1,
3490         .used_kvd_sizes                 = 1,
3491         .kvd_hash_single_parts          = 59,
3492         .kvd_hash_double_parts          = 41,
3493         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
3494         .swid_config                    = {
3495                 {
3496                         .used_type      = 1,
3497                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3498                 }
3499         },
3500 };
3501
3502 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3503         .used_flood_mode                = 1,
3504         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3505         .used_max_ib_mc                 = 1,
3506         .max_ib_mc                      = 0,
3507         .used_max_pkey                  = 1,
3508         .max_pkey                       = 0,
3509         .used_ubridge                   = 1,
3510         .ubridge                        = 1,
3511         .swid_config                    = {
3512                 {
3513                         .used_type      = 1,
3514                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3515                 }
3516         },
3517         .used_cqe_time_stamp_type       = 1,
3518         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3519 };
3520
3521 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3522  * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3523  * table.
3524  */
3525 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3526
3527 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3528         .used_max_lag                   = 1,
3529         .max_lag                        = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3530         .used_flood_mode                = 1,
3531         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3532         .used_max_ib_mc                 = 1,
3533         .max_ib_mc                      = 0,
3534         .used_max_pkey                  = 1,
3535         .max_pkey                       = 0,
3536         .used_ubridge                   = 1,
3537         .ubridge                        = 1,
3538         .swid_config                    = {
3539                 {
3540                         .used_type      = 1,
3541                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3542                 }
3543         },
3544         .used_cqe_time_stamp_type       = 1,
3545         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3546 };
3547
3548 static void
3549 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3550                                       struct devlink_resource_size_params *kvd_size_params,
3551                                       struct devlink_resource_size_params *linear_size_params,
3552                                       struct devlink_resource_size_params *hash_double_size_params,
3553                                       struct devlink_resource_size_params *hash_single_size_params)
3554 {
3555         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3556                                                  KVD_SINGLE_MIN_SIZE);
3557         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3558                                                  KVD_DOUBLE_MIN_SIZE);
3559         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3560         u32 linear_size_min = 0;
3561
3562         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3563                                           MLXSW_SP_KVD_GRANULARITY,
3564                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3565         devlink_resource_size_params_init(linear_size_params, linear_size_min,
3566                                           kvd_size - single_size_min -
3567                                           double_size_min,
3568                                           MLXSW_SP_KVD_GRANULARITY,
3569                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3570         devlink_resource_size_params_init(hash_double_size_params,
3571                                           double_size_min,
3572                                           kvd_size - single_size_min -
3573                                           linear_size_min,
3574                                           MLXSW_SP_KVD_GRANULARITY,
3575                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3576         devlink_resource_size_params_init(hash_single_size_params,
3577                                           single_size_min,
3578                                           kvd_size - double_size_min -
3579                                           linear_size_min,
3580                                           MLXSW_SP_KVD_GRANULARITY,
3581                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3582 }
3583
3584 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3585 {
3586         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3587         struct devlink_resource_size_params hash_single_size_params;
3588         struct devlink_resource_size_params hash_double_size_params;
3589         struct devlink_resource_size_params linear_size_params;
3590         struct devlink_resource_size_params kvd_size_params;
3591         u32 kvd_size, single_size, double_size, linear_size;
3592         const struct mlxsw_config_profile *profile;
3593         int err;
3594
3595         profile = &mlxsw_sp1_config_profile;
3596         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3597                 return -EIO;
3598
3599         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3600                                               &linear_size_params,
3601                                               &hash_double_size_params,
3602                                               &hash_single_size_params);
3603
3604         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3605         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3606                                      kvd_size, MLXSW_SP_RESOURCE_KVD,
3607                                      DEVLINK_RESOURCE_ID_PARENT_TOP,
3608                                      &kvd_size_params);
3609         if (err)
3610                 return err;
3611
3612         linear_size = profile->kvd_linear_size;
3613         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3614                                      linear_size,
3615                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3616                                      MLXSW_SP_RESOURCE_KVD,
3617                                      &linear_size_params);
3618         if (err)
3619                 return err;
3620
3621         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3622         if  (err)
3623                 return err;
3624
3625         double_size = kvd_size - linear_size;
3626         double_size *= profile->kvd_hash_double_parts;
3627         double_size /= profile->kvd_hash_double_parts +
3628                        profile->kvd_hash_single_parts;
3629         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3630         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3631                                      double_size,
3632                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3633                                      MLXSW_SP_RESOURCE_KVD,
3634                                      &hash_double_size_params);
3635         if (err)
3636                 return err;
3637
3638         single_size = kvd_size - double_size - linear_size;
3639         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3640                                      single_size,
3641                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3642                                      MLXSW_SP_RESOURCE_KVD,
3643                                      &hash_single_size_params);
3644         if (err)
3645                 return err;
3646
3647         return 0;
3648 }
3649
3650 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3651 {
3652         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3653         struct devlink_resource_size_params kvd_size_params;
3654         u32 kvd_size;
3655
3656         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3657                 return -EIO;
3658
3659         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3660         devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3661                                           MLXSW_SP_KVD_GRANULARITY,
3662                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3663
3664         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3665                                       kvd_size, MLXSW_SP_RESOURCE_KVD,
3666                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3667                                       &kvd_size_params);
3668 }
3669
3670 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3671 {
3672         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3673         struct devlink_resource_size_params span_size_params;
3674         u32 max_span;
3675
3676         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3677                 return -EIO;
3678
3679         max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3680         devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3681                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3682
3683         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3684                                       max_span, MLXSW_SP_RESOURCE_SPAN,
3685                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3686                                       &span_size_params);
3687 }
3688
3689 static int
3690 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3691 {
3692         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3693         struct devlink_resource_size_params size_params;
3694         u8 max_rif_mac_profiles;
3695
3696         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3697                 max_rif_mac_profiles = 1;
3698         else
3699                 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3700                                                           MAX_RIF_MAC_PROFILES);
3701         devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3702                                           max_rif_mac_profiles, 1,
3703                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3704
3705         return devl_resource_register(devlink,
3706                                       "rif_mac_profiles",
3707                                       max_rif_mac_profiles,
3708                                       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3709                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3710                                       &size_params);
3711 }
3712
3713 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3714 {
3715         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3716         struct devlink_resource_size_params size_params;
3717         u64 max_rifs;
3718
3719         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3720                 return -EIO;
3721
3722         max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3723         devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3724                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3725
3726         return devl_resource_register(devlink, "rifs", max_rifs,
3727                                       MLXSW_SP_RESOURCE_RIFS,
3728                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3729                                       &size_params);
3730 }
3731
3732 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3733 {
3734         int err;
3735
3736         err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3737         if (err)
3738                 return err;
3739
3740         err = mlxsw_sp_resources_span_register(mlxsw_core);
3741         if (err)
3742                 goto err_resources_span_register;
3743
3744         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3745         if (err)
3746                 goto err_resources_counter_register;
3747
3748         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3749         if (err)
3750                 goto err_policer_resources_register;
3751
3752         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3753         if (err)
3754                 goto err_resources_rif_mac_profile_register;
3755
3756         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3757         if (err)
3758                 goto err_resources_rifs_register;
3759
3760         return 0;
3761
3762 err_resources_rifs_register:
3763 err_resources_rif_mac_profile_register:
3764 err_policer_resources_register:
3765 err_resources_counter_register:
3766 err_resources_span_register:
3767         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3768         return err;
3769 }
3770
3771 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3772 {
3773         int err;
3774
3775         err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3776         if (err)
3777                 return err;
3778
3779         err = mlxsw_sp_resources_span_register(mlxsw_core);
3780         if (err)
3781                 goto err_resources_span_register;
3782
3783         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3784         if (err)
3785                 goto err_resources_counter_register;
3786
3787         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3788         if (err)
3789                 goto err_policer_resources_register;
3790
3791         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3792         if (err)
3793                 goto err_resources_rif_mac_profile_register;
3794
3795         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3796         if (err)
3797                 goto err_resources_rifs_register;
3798
3799         return 0;
3800
3801 err_resources_rifs_register:
3802 err_resources_rif_mac_profile_register:
3803 err_policer_resources_register:
3804 err_resources_counter_register:
3805 err_resources_span_register:
3806         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3807         return err;
3808 }
3809
3810 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3811                                   const struct mlxsw_config_profile *profile,
3812                                   u64 *p_single_size, u64 *p_double_size,
3813                                   u64 *p_linear_size)
3814 {
3815         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3816         u32 double_size;
3817         int err;
3818
3819         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3820             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3821                 return -EIO;
3822
3823         /* The hash part is what left of the kvd without the
3824          * linear part. It is split to the single size and
3825          * double size by the parts ratio from the profile.
3826          * Both sizes must be a multiplications of the
3827          * granularity from the profile. In case the user
3828          * provided the sizes they are obtained via devlink.
3829          */
3830         err = devl_resource_size_get(devlink,
3831                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3832                                      p_linear_size);
3833         if (err)
3834                 *p_linear_size = profile->kvd_linear_size;
3835
3836         err = devl_resource_size_get(devlink,
3837                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3838                                      p_double_size);
3839         if (err) {
3840                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3841                               *p_linear_size;
3842                 double_size *= profile->kvd_hash_double_parts;
3843                 double_size /= profile->kvd_hash_double_parts +
3844                                profile->kvd_hash_single_parts;
3845                 *p_double_size = rounddown(double_size,
3846                                            MLXSW_SP_KVD_GRANULARITY);
3847         }
3848
3849         err = devl_resource_size_get(devlink,
3850                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3851                                      p_single_size);
3852         if (err)
3853                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3854                                  *p_double_size - *p_linear_size;
3855
3856         /* Check results are legal. */
3857         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3858             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3859             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3860                 return -EIO;
3861
3862         return 0;
3863 }
3864
3865 static int
3866 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3867                                              struct devlink_param_gset_ctx *ctx)
3868 {
3869         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3870         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3871
3872         ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3873         return 0;
3874 }
3875
3876 static int
3877 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3878                                              struct devlink_param_gset_ctx *ctx)
3879 {
3880         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3881         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3882
3883         return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3884 }
3885
3886 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3887         DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3888                              "acl_region_rehash_interval",
3889                              DEVLINK_PARAM_TYPE_U32,
3890                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3891                              mlxsw_sp_params_acl_region_rehash_intrvl_get,
3892                              mlxsw_sp_params_acl_region_rehash_intrvl_set,
3893                              NULL),
3894 };
3895
3896 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3897 {
3898         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3899         union devlink_param_value value;
3900         int err;
3901
3902         err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3903                                       ARRAY_SIZE(mlxsw_sp2_devlink_params));
3904         if (err)
3905                 return err;
3906
3907         value.vu32 = 0;
3908         devlink_param_driverinit_value_set(devlink,
3909                                            MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3910                                            value);
3911         return 0;
3912 }
3913
3914 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3915 {
3916         devlink_params_unregister(priv_to_devlink(mlxsw_core),
3917                                   mlxsw_sp2_devlink_params,
3918                                   ARRAY_SIZE(mlxsw_sp2_devlink_params));
3919 }
3920
3921 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3922                                      struct sk_buff *skb, u16 local_port)
3923 {
3924         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3925
3926         skb_pull(skb, MLXSW_TXHDR_LEN);
3927         mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3928 }
3929
3930 static struct mlxsw_driver mlxsw_sp1_driver = {
3931         .kind                           = mlxsw_sp1_driver_name,
3932         .priv_size                      = sizeof(struct mlxsw_sp),
3933         .fw_req_rev                     = &mlxsw_sp1_fw_rev,
3934         .fw_filename                    = MLXSW_SP1_FW_FILENAME,
3935         .init                           = mlxsw_sp1_init,
3936         .fini                           = mlxsw_sp_fini,
3937         .port_split                     = mlxsw_sp_port_split,
3938         .port_unsplit                   = mlxsw_sp_port_unsplit,
3939         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3940         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3941         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3942         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3943         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3944         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3945         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3946         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3947         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3948         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3949         .trap_init                      = mlxsw_sp_trap_init,
3950         .trap_fini                      = mlxsw_sp_trap_fini,
3951         .trap_action_set                = mlxsw_sp_trap_action_set,
3952         .trap_group_init                = mlxsw_sp_trap_group_init,
3953         .trap_group_set                 = mlxsw_sp_trap_group_set,
3954         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3955         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3956         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3957         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3958         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3959         .resources_register             = mlxsw_sp1_resources_register,
3960         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
3961         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3962         .txhdr_len                      = MLXSW_TXHDR_LEN,
3963         .profile                        = &mlxsw_sp1_config_profile,
3964         .sdq_supports_cqe_v2            = false,
3965 };
3966
3967 static struct mlxsw_driver mlxsw_sp2_driver = {
3968         .kind                           = mlxsw_sp2_driver_name,
3969         .priv_size                      = sizeof(struct mlxsw_sp),
3970         .fw_req_rev                     = &mlxsw_sp2_fw_rev,
3971         .fw_filename                    = MLXSW_SP2_FW_FILENAME,
3972         .init                           = mlxsw_sp2_init,
3973         .fini                           = mlxsw_sp_fini,
3974         .port_split                     = mlxsw_sp_port_split,
3975         .port_unsplit                   = mlxsw_sp_port_unsplit,
3976         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
3977         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3978         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3979         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3980         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3981         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3982         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3983         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3984         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3985         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3986         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3987         .trap_init                      = mlxsw_sp_trap_init,
3988         .trap_fini                      = mlxsw_sp_trap_fini,
3989         .trap_action_set                = mlxsw_sp_trap_action_set,
3990         .trap_group_init                = mlxsw_sp_trap_group_init,
3991         .trap_group_set                 = mlxsw_sp_trap_group_set,
3992         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3993         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3994         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3995         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3996         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3997         .resources_register             = mlxsw_sp2_resources_register,
3998         .params_register                = mlxsw_sp2_params_register,
3999         .params_unregister              = mlxsw_sp2_params_unregister,
4000         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4001         .txhdr_len                      = MLXSW_TXHDR_LEN,
4002         .profile                        = &mlxsw_sp2_config_profile,
4003         .sdq_supports_cqe_v2            = true,
4004 };
4005
4006 static struct mlxsw_driver mlxsw_sp3_driver = {
4007         .kind                           = mlxsw_sp3_driver_name,
4008         .priv_size                      = sizeof(struct mlxsw_sp),
4009         .fw_req_rev                     = &mlxsw_sp3_fw_rev,
4010         .fw_filename                    = MLXSW_SP3_FW_FILENAME,
4011         .init                           = mlxsw_sp3_init,
4012         .fini                           = mlxsw_sp_fini,
4013         .port_split                     = mlxsw_sp_port_split,
4014         .port_unsplit                   = mlxsw_sp_port_unsplit,
4015         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4016         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4017         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4018         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4019         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4020         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4021         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4022         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4023         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4024         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4025         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4026         .trap_init                      = mlxsw_sp_trap_init,
4027         .trap_fini                      = mlxsw_sp_trap_fini,
4028         .trap_action_set                = mlxsw_sp_trap_action_set,
4029         .trap_group_init                = mlxsw_sp_trap_group_init,
4030         .trap_group_set                 = mlxsw_sp_trap_group_set,
4031         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4032         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4033         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4034         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4035         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4036         .resources_register             = mlxsw_sp2_resources_register,
4037         .params_register                = mlxsw_sp2_params_register,
4038         .params_unregister              = mlxsw_sp2_params_unregister,
4039         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4040         .txhdr_len                      = MLXSW_TXHDR_LEN,
4041         .profile                        = &mlxsw_sp2_config_profile,
4042         .sdq_supports_cqe_v2            = true,
4043 };
4044
4045 static struct mlxsw_driver mlxsw_sp4_driver = {
4046         .kind                           = mlxsw_sp4_driver_name,
4047         .priv_size                      = sizeof(struct mlxsw_sp),
4048         .init                           = mlxsw_sp4_init,
4049         .fini                           = mlxsw_sp_fini,
4050         .port_split                     = mlxsw_sp_port_split,
4051         .port_unsplit                   = mlxsw_sp_port_unsplit,
4052         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4053         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4054         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4055         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4056         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4057         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4058         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4059         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4060         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4061         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4062         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4063         .trap_init                      = mlxsw_sp_trap_init,
4064         .trap_fini                      = mlxsw_sp_trap_fini,
4065         .trap_action_set                = mlxsw_sp_trap_action_set,
4066         .trap_group_init                = mlxsw_sp_trap_group_init,
4067         .trap_group_set                 = mlxsw_sp_trap_group_set,
4068         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4069         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4070         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4071         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4072         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4073         .resources_register             = mlxsw_sp2_resources_register,
4074         .params_register                = mlxsw_sp2_params_register,
4075         .params_unregister              = mlxsw_sp2_params_unregister,
4076         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4077         .txhdr_len                      = MLXSW_TXHDR_LEN,
4078         .profile                        = &mlxsw_sp4_config_profile,
4079         .sdq_supports_cqe_v2            = true,
4080 };
4081
4082 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4083 {
4084         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4085 }
4086
4087 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4088                                    struct netdev_nested_priv *priv)
4089 {
4090         int ret = 0;
4091
4092         if (mlxsw_sp_port_dev_check(lower_dev)) {
4093                 priv->data = (void *)netdev_priv(lower_dev);
4094                 ret = 1;
4095         }
4096
4097         return ret;
4098 }
4099
4100 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4101 {
4102         struct netdev_nested_priv priv = {
4103                 .data = NULL,
4104         };
4105
4106         if (mlxsw_sp_port_dev_check(dev))
4107                 return netdev_priv(dev);
4108
4109         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4110
4111         return (struct mlxsw_sp_port *)priv.data;
4112 }
4113
4114 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4115 {
4116         struct mlxsw_sp_port *mlxsw_sp_port;
4117
4118         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4119         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4120 }
4121
4122 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4123 {
4124         struct netdev_nested_priv priv = {
4125                 .data = NULL,
4126         };
4127
4128         if (mlxsw_sp_port_dev_check(dev))
4129                 return netdev_priv(dev);
4130
4131         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4132                                       &priv);
4133
4134         return (struct mlxsw_sp_port *)priv.data;
4135 }
4136
4137 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
4138 {
4139         struct mlxsw_sp_port *mlxsw_sp_port;
4140
4141         rcu_read_lock();
4142         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
4143         if (mlxsw_sp_port)
4144                 dev_hold(mlxsw_sp_port->dev);
4145         rcu_read_unlock();
4146         return mlxsw_sp_port;
4147 }
4148
4149 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
4150 {
4151         dev_put(mlxsw_sp_port->dev);
4152 }
4153
4154 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4155 {
4156         char mprs_pl[MLXSW_REG_MPRS_LEN];
4157         int err = 0;
4158
4159         mutex_lock(&mlxsw_sp->parsing.lock);
4160
4161         if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4162                 goto out_unlock;
4163
4164         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4165                             mlxsw_sp->parsing.vxlan_udp_dport);
4166         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4167         if (err)
4168                 goto out_unlock;
4169
4170         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4171         refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4172
4173 out_unlock:
4174         mutex_unlock(&mlxsw_sp->parsing.lock);
4175         return err;
4176 }
4177
4178 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4179 {
4180         char mprs_pl[MLXSW_REG_MPRS_LEN];
4181
4182         mutex_lock(&mlxsw_sp->parsing.lock);
4183
4184         if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4185                 goto out_unlock;
4186
4187         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4188                             mlxsw_sp->parsing.vxlan_udp_dport);
4189         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4190         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4191
4192 out_unlock:
4193         mutex_unlock(&mlxsw_sp->parsing.lock);
4194 }
4195
4196 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4197                                          __be16 udp_dport)
4198 {
4199         char mprs_pl[MLXSW_REG_MPRS_LEN];
4200         int err;
4201
4202         mutex_lock(&mlxsw_sp->parsing.lock);
4203
4204         mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4205                             be16_to_cpu(udp_dport));
4206         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4207         if (err)
4208                 goto out_unlock;
4209
4210         mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4211
4212 out_unlock:
4213         mutex_unlock(&mlxsw_sp->parsing.lock);
4214         return err;
4215 }
4216
4217 static void
4218 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4219                                  struct net_device *lag_dev)
4220 {
4221         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4222         struct net_device *upper_dev;
4223         struct list_head *iter;
4224
4225         if (netif_is_bridge_port(lag_dev))
4226                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4227
4228         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4229                 if (!netif_is_bridge_port(upper_dev))
4230                         continue;
4231                 br_dev = netdev_master_upper_dev_get(upper_dev);
4232                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4233         }
4234 }
4235
4236 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4237 {
4238         char sldr_pl[MLXSW_REG_SLDR_LEN];
4239
4240         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4241         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4242 }
4243
4244 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4245 {
4246         char sldr_pl[MLXSW_REG_SLDR_LEN];
4247
4248         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4249         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4250 }
4251
4252 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4253                                      u16 lag_id, u8 port_index)
4254 {
4255         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4256         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4257
4258         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4259                                       lag_id, port_index);
4260         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4261 }
4262
4263 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4264                                         u16 lag_id)
4265 {
4266         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4267         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4268
4269         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4270                                          lag_id);
4271         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4272 }
4273
4274 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4275                                         u16 lag_id)
4276 {
4277         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4278         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4279
4280         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4281                                         lag_id);
4282         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4283 }
4284
4285 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4286                                          u16 lag_id)
4287 {
4288         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4289         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4290
4291         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4292                                          lag_id);
4293         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4294 }
4295
4296 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4297                                   struct net_device *lag_dev,
4298                                   u16 *p_lag_id)
4299 {
4300         struct mlxsw_sp_upper *lag;
4301         int free_lag_id = -1;
4302         u16 max_lag;
4303         int err, i;
4304
4305         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
4306         if (err)
4307                 return err;
4308
4309         for (i = 0; i < max_lag; i++) {
4310                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4311                 if (lag->ref_count) {
4312                         if (lag->dev == lag_dev) {
4313                                 *p_lag_id = i;
4314                                 return 0;
4315                         }
4316                 } else if (free_lag_id < 0) {
4317                         free_lag_id = i;
4318                 }
4319         }
4320         if (free_lag_id < 0)
4321                 return -EBUSY;
4322         *p_lag_id = free_lag_id;
4323         return 0;
4324 }
4325
4326 static bool
4327 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4328                           struct net_device *lag_dev,
4329                           struct netdev_lag_upper_info *lag_upper_info,
4330                           struct netlink_ext_ack *extack)
4331 {
4332         u16 lag_id;
4333
4334         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4335                 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4336                 return false;
4337         }
4338         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4339                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4340                 return false;
4341         }
4342         return true;
4343 }
4344
4345 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4346                                        u16 lag_id, u8 *p_port_index)
4347 {
4348         u64 max_lag_members;
4349         int i;
4350
4351         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4352                                              MAX_LAG_MEMBERS);
4353         for (i = 0; i < max_lag_members; i++) {
4354                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4355                         *p_port_index = i;
4356                         return 0;
4357                 }
4358         }
4359         return -EBUSY;
4360 }
4361
4362 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4363                                   struct net_device *lag_dev,
4364                                   struct netlink_ext_ack *extack)
4365 {
4366         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4367         struct mlxsw_sp_upper *lag;
4368         u16 lag_id;
4369         u8 port_index;
4370         int err;
4371
4372         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4373         if (err)
4374                 return err;
4375         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4376         if (!lag->ref_count) {
4377                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4378                 if (err)
4379                         return err;
4380                 lag->dev = lag_dev;
4381         }
4382
4383         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4384         if (err)
4385                 return err;
4386         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4387         if (err)
4388                 goto err_col_port_add;
4389
4390         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4391                                    mlxsw_sp_port->local_port);
4392         mlxsw_sp_port->lag_id = lag_id;
4393         mlxsw_sp_port->lagged = 1;
4394         lag->ref_count++;
4395
4396         /* Port is no longer usable as a router interface */
4397         if (mlxsw_sp_port->default_vlan->fid)
4398                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4399
4400         /* Join a router interface configured on the LAG, if exists */
4401         err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
4402                                              lag_dev, extack);
4403         if (err)
4404                 goto err_router_join;
4405
4406         return 0;
4407
4408 err_router_join:
4409         lag->ref_count--;
4410         mlxsw_sp_port->lagged = 0;
4411         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4412                                      mlxsw_sp_port->local_port);
4413         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4414 err_col_port_add:
4415         if (!lag->ref_count)
4416                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4417         return err;
4418 }
4419
4420 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4421                                     struct net_device *lag_dev)
4422 {
4423         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4424         u16 lag_id = mlxsw_sp_port->lag_id;
4425         struct mlxsw_sp_upper *lag;
4426
4427         if (!mlxsw_sp_port->lagged)
4428                 return;
4429         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4430         WARN_ON(lag->ref_count == 0);
4431
4432         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4433
4434         /* Any VLANs configured on the port are no longer valid */
4435         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4436         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4437         /* Make the LAG and its directly linked uppers leave bridges they
4438          * are memeber in
4439          */
4440         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4441
4442         if (lag->ref_count == 1)
4443                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4444
4445         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4446                                      mlxsw_sp_port->local_port);
4447         mlxsw_sp_port->lagged = 0;
4448         lag->ref_count--;
4449
4450         /* Make sure untagged frames are allowed to ingress */
4451         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4452                                ETH_P_8021Q);
4453 }
4454
4455 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4456                                       u16 lag_id)
4457 {
4458         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4459         char sldr_pl[MLXSW_REG_SLDR_LEN];
4460
4461         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4462                                          mlxsw_sp_port->local_port);
4463         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4464 }
4465
4466 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4467                                          u16 lag_id)
4468 {
4469         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4470         char sldr_pl[MLXSW_REG_SLDR_LEN];
4471
4472         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4473                                             mlxsw_sp_port->local_port);
4474         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4475 }
4476
4477 static int
4478 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4479 {
4480         int err;
4481
4482         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4483                                            mlxsw_sp_port->lag_id);
4484         if (err)
4485                 return err;
4486
4487         err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4488         if (err)
4489                 goto err_dist_port_add;
4490
4491         return 0;
4492
4493 err_dist_port_add:
4494         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4495         return err;
4496 }
4497
4498 static int
4499 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4500 {
4501         int err;
4502
4503         err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4504                                             mlxsw_sp_port->lag_id);
4505         if (err)
4506                 return err;
4507
4508         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4509                                             mlxsw_sp_port->lag_id);
4510         if (err)
4511                 goto err_col_port_disable;
4512
4513         return 0;
4514
4515 err_col_port_disable:
4516         mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4517         return err;
4518 }
4519
4520 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4521                                      struct netdev_lag_lower_state_info *info)
4522 {
4523         if (info->tx_enabled)
4524                 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4525         else
4526                 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4527 }
4528
4529 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4530                                  bool enable)
4531 {
4532         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4533         enum mlxsw_reg_spms_state spms_state;
4534         char *spms_pl;
4535         u16 vid;
4536         int err;
4537
4538         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4539                               MLXSW_REG_SPMS_STATE_DISCARDING;
4540
4541         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4542         if (!spms_pl)
4543                 return -ENOMEM;
4544         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4545
4546         for (vid = 0; vid < VLAN_N_VID; vid++)
4547                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4548
4549         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4550         kfree(spms_pl);
4551         return err;
4552 }
4553
4554 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4555 {
4556         u16 vid = 1;
4557         int err;
4558
4559         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4560         if (err)
4561                 return err;
4562         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4563         if (err)
4564                 goto err_port_stp_set;
4565         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4566                                      true, false);
4567         if (err)
4568                 goto err_port_vlan_set;
4569
4570         for (; vid <= VLAN_N_VID - 1; vid++) {
4571                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4572                                                      vid, false);
4573                 if (err)
4574                         goto err_vid_learning_set;
4575         }
4576
4577         return 0;
4578
4579 err_vid_learning_set:
4580         for (vid--; vid >= 1; vid--)
4581                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4582 err_port_vlan_set:
4583         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4584 err_port_stp_set:
4585         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4586         return err;
4587 }
4588
4589 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4590 {
4591         u16 vid;
4592
4593         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4594                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4595                                                vid, true);
4596
4597         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4598                                false, false);
4599         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4600         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4601 }
4602
4603 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4604 {
4605         unsigned int num_vxlans = 0;
4606         struct net_device *dev;
4607         struct list_head *iter;
4608
4609         netdev_for_each_lower_dev(br_dev, dev, iter) {
4610                 if (netif_is_vxlan(dev))
4611                         num_vxlans++;
4612         }
4613
4614         return num_vxlans > 1;
4615 }
4616
4617 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4618 {
4619         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4620         struct net_device *dev;
4621         struct list_head *iter;
4622
4623         netdev_for_each_lower_dev(br_dev, dev, iter) {
4624                 u16 pvid;
4625                 int err;
4626
4627                 if (!netif_is_vxlan(dev))
4628                         continue;
4629
4630                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4631                 if (err || !pvid)
4632                         continue;
4633
4634                 if (test_and_set_bit(pvid, vlans))
4635                         return false;
4636         }
4637
4638         return true;
4639 }
4640
4641 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4642                                            struct netlink_ext_ack *extack)
4643 {
4644         if (br_multicast_enabled(br_dev)) {
4645                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4646                 return false;
4647         }
4648
4649         if (!br_vlan_enabled(br_dev) &&
4650             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4651                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4652                 return false;
4653         }
4654
4655         if (br_vlan_enabled(br_dev) &&
4656             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4657                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4658                 return false;
4659         }
4660
4661         return true;
4662 }
4663
4664 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4665                                                struct net_device *dev,
4666                                                unsigned long event, void *ptr)
4667 {
4668         struct netdev_notifier_changeupper_info *info;
4669         struct mlxsw_sp_port *mlxsw_sp_port;
4670         struct netlink_ext_ack *extack;
4671         struct net_device *upper_dev;
4672         struct mlxsw_sp *mlxsw_sp;
4673         int err = 0;
4674         u16 proto;
4675
4676         mlxsw_sp_port = netdev_priv(dev);
4677         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4678         info = ptr;
4679         extack = netdev_notifier_info_to_extack(&info->info);
4680
4681         switch (event) {
4682         case NETDEV_PRECHANGEUPPER:
4683                 upper_dev = info->upper_dev;
4684                 if (!is_vlan_dev(upper_dev) &&
4685                     !netif_is_lag_master(upper_dev) &&
4686                     !netif_is_bridge_master(upper_dev) &&
4687                     !netif_is_ovs_master(upper_dev) &&
4688                     !netif_is_macvlan(upper_dev) &&
4689                     !netif_is_l3_master(upper_dev)) {
4690                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4691                         return -EINVAL;
4692                 }
4693                 if (!info->linking)
4694                         break;
4695                 if (netif_is_bridge_master(upper_dev) &&
4696                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4697                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4698                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4699                         return -EOPNOTSUPP;
4700                 if (netdev_has_any_upper_dev(upper_dev) &&
4701                     (!netif_is_bridge_master(upper_dev) ||
4702                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4703                                                           upper_dev))) {
4704                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4705                         return -EINVAL;
4706                 }
4707                 if (netif_is_lag_master(upper_dev) &&
4708                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4709                                                info->upper_info, extack))
4710                         return -EINVAL;
4711                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4712                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4713                         return -EINVAL;
4714                 }
4715                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4716                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4717                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4718                         return -EINVAL;
4719                 }
4720                 if (netif_is_macvlan(upper_dev) &&
4721                     !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
4722                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4723                         return -EOPNOTSUPP;
4724                 }
4725                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4726                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4727                         return -EINVAL;
4728                 }
4729                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4730                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4731                         return -EINVAL;
4732                 }
4733                 if (netif_is_bridge_master(upper_dev)) {
4734                         br_vlan_get_proto(upper_dev, &proto);
4735                         if (br_vlan_enabled(upper_dev) &&
4736                             proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4737                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4738                                 return -EOPNOTSUPP;
4739                         }
4740                         if (vlan_uses_dev(lower_dev) &&
4741                             br_vlan_enabled(upper_dev) &&
4742                             proto == ETH_P_8021AD) {
4743                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4744                                 return -EOPNOTSUPP;
4745                         }
4746                 }
4747                 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4748                         struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4749
4750                         if (br_vlan_enabled(br_dev)) {
4751                                 br_vlan_get_proto(br_dev, &proto);
4752                                 if (proto == ETH_P_8021AD) {
4753                                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4754                                         return -EOPNOTSUPP;
4755                                 }
4756                         }
4757                 }
4758                 if (is_vlan_dev(upper_dev) &&
4759                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4760                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4761                         return -EOPNOTSUPP;
4762                 }
4763                 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
4764                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4765                         return -EOPNOTSUPP;
4766                 }
4767                 break;
4768         case NETDEV_CHANGEUPPER:
4769                 upper_dev = info->upper_dev;
4770                 if (netif_is_bridge_master(upper_dev)) {
4771                         if (info->linking)
4772                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4773                                                                 lower_dev,
4774                                                                 upper_dev,
4775                                                                 extack);
4776                         else
4777                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4778                                                            lower_dev,
4779                                                            upper_dev);
4780                 } else if (netif_is_lag_master(upper_dev)) {
4781                         if (info->linking) {
4782                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4783                                                              upper_dev, extack);
4784                         } else {
4785                                 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4786                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4787                                                         upper_dev);
4788                         }
4789                 } else if (netif_is_ovs_master(upper_dev)) {
4790                         if (info->linking)
4791                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4792                         else
4793                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4794                 } else if (netif_is_macvlan(upper_dev)) {
4795                         if (!info->linking)
4796                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4797                 } else if (is_vlan_dev(upper_dev)) {
4798                         struct net_device *br_dev;
4799
4800                         if (!netif_is_bridge_port(upper_dev))
4801                                 break;
4802                         if (info->linking)
4803                                 break;
4804                         br_dev = netdev_master_upper_dev_get(upper_dev);
4805                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4806                                                    br_dev);
4807                 }
4808                 break;
4809         }
4810
4811         return err;
4812 }
4813
4814 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4815                                                unsigned long event, void *ptr)
4816 {
4817         struct netdev_notifier_changelowerstate_info *info;
4818         struct mlxsw_sp_port *mlxsw_sp_port;
4819         int err;
4820
4821         mlxsw_sp_port = netdev_priv(dev);
4822         info = ptr;
4823
4824         switch (event) {
4825         case NETDEV_CHANGELOWERSTATE:
4826                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4827                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4828                                                         info->lower_state_info);
4829                         if (err)
4830                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4831                 }
4832                 break;
4833         }
4834
4835         return 0;
4836 }
4837
4838 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4839                                          struct net_device *port_dev,
4840                                          unsigned long event, void *ptr)
4841 {
4842         switch (event) {
4843         case NETDEV_PRECHANGEUPPER:
4844         case NETDEV_CHANGEUPPER:
4845                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4846                                                            event, ptr);
4847         case NETDEV_CHANGELOWERSTATE:
4848                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4849                                                            ptr);
4850         }
4851
4852         return 0;
4853 }
4854
4855 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4856                                         unsigned long event, void *ptr)
4857 {
4858         struct net_device *dev;
4859         struct list_head *iter;
4860         int ret;
4861
4862         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4863                 if (mlxsw_sp_port_dev_check(dev)) {
4864                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4865                                                             ptr);
4866                         if (ret)
4867                                 return ret;
4868                 }
4869         }
4870
4871         return 0;
4872 }
4873
4874 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4875                                               struct net_device *dev,
4876                                               unsigned long event, void *ptr,
4877                                               u16 vid)
4878 {
4879         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4880         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4881         struct netdev_notifier_changeupper_info *info = ptr;
4882         struct netlink_ext_ack *extack;
4883         struct net_device *upper_dev;
4884         int err = 0;
4885
4886         extack = netdev_notifier_info_to_extack(&info->info);
4887
4888         switch (event) {
4889         case NETDEV_PRECHANGEUPPER:
4890                 upper_dev = info->upper_dev;
4891                 if (!netif_is_bridge_master(upper_dev) &&
4892                     !netif_is_macvlan(upper_dev) &&
4893                     !netif_is_l3_master(upper_dev)) {
4894                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4895                         return -EINVAL;
4896                 }
4897                 if (!info->linking)
4898                         break;
4899                 if (netif_is_bridge_master(upper_dev) &&
4900                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4901                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4902                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4903                         return -EOPNOTSUPP;
4904                 if (netdev_has_any_upper_dev(upper_dev) &&
4905                     (!netif_is_bridge_master(upper_dev) ||
4906                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4907                                                           upper_dev))) {
4908                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4909                         return -EINVAL;
4910                 }
4911                 if (netif_is_macvlan(upper_dev) &&
4912                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4913                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4914                         return -EOPNOTSUPP;
4915                 }
4916                 break;
4917         case NETDEV_CHANGEUPPER:
4918                 upper_dev = info->upper_dev;
4919                 if (netif_is_bridge_master(upper_dev)) {
4920                         if (info->linking)
4921                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4922                                                                 vlan_dev,
4923                                                                 upper_dev,
4924                                                                 extack);
4925                         else
4926                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4927                                                            vlan_dev,
4928                                                            upper_dev);
4929                 } else if (netif_is_macvlan(upper_dev)) {
4930                         if (!info->linking)
4931                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4932                 }
4933                 break;
4934         }
4935
4936         return err;
4937 }
4938
4939 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4940                                                   struct net_device *lag_dev,
4941                                                   unsigned long event,
4942                                                   void *ptr, u16 vid)
4943 {
4944         struct net_device *dev;
4945         struct list_head *iter;
4946         int ret;
4947
4948         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4949                 if (mlxsw_sp_port_dev_check(dev)) {
4950                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4951                                                                  event, ptr,
4952                                                                  vid);
4953                         if (ret)
4954                                 return ret;
4955                 }
4956         }
4957
4958         return 0;
4959 }
4960
4961 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4962                                                 struct net_device *br_dev,
4963                                                 unsigned long event, void *ptr,
4964                                                 u16 vid)
4965 {
4966         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4967         struct netdev_notifier_changeupper_info *info = ptr;
4968         struct netlink_ext_ack *extack;
4969         struct net_device *upper_dev;
4970
4971         if (!mlxsw_sp)
4972                 return 0;
4973
4974         extack = netdev_notifier_info_to_extack(&info->info);
4975
4976         switch (event) {
4977         case NETDEV_PRECHANGEUPPER:
4978                 upper_dev = info->upper_dev;
4979                 if (!netif_is_macvlan(upper_dev) &&
4980                     !netif_is_l3_master(upper_dev)) {
4981                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4982                         return -EOPNOTSUPP;
4983                 }
4984                 if (!info->linking)
4985                         break;
4986                 if (netif_is_macvlan(upper_dev) &&
4987                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4988                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4989                         return -EOPNOTSUPP;
4990                 }
4991                 break;
4992         case NETDEV_CHANGEUPPER:
4993                 upper_dev = info->upper_dev;
4994                 if (info->linking)
4995                         break;
4996                 if (netif_is_macvlan(upper_dev))
4997                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4998                 break;
4999         }
5000
5001         return 0;
5002 }
5003
5004 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
5005                                          unsigned long event, void *ptr)
5006 {
5007         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5008         u16 vid = vlan_dev_vlan_id(vlan_dev);
5009
5010         if (mlxsw_sp_port_dev_check(real_dev))
5011                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5012                                                           event, ptr, vid);
5013         else if (netif_is_lag_master(real_dev))
5014                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5015                                                               real_dev, event,
5016                                                               ptr, vid);
5017         else if (netif_is_bridge_master(real_dev))
5018                 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
5019                                                             event, ptr, vid);
5020
5021         return 0;
5022 }
5023
5024 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
5025                                            unsigned long event, void *ptr)
5026 {
5027         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
5028         struct netdev_notifier_changeupper_info *info = ptr;
5029         struct netlink_ext_ack *extack;
5030         struct net_device *upper_dev;
5031         u16 proto;
5032
5033         if (!mlxsw_sp)
5034                 return 0;
5035
5036         extack = netdev_notifier_info_to_extack(&info->info);
5037
5038         switch (event) {
5039         case NETDEV_PRECHANGEUPPER:
5040                 upper_dev = info->upper_dev;
5041                 if (!is_vlan_dev(upper_dev) &&
5042                     !netif_is_macvlan(upper_dev) &&
5043                     !netif_is_l3_master(upper_dev)) {
5044                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5045                         return -EOPNOTSUPP;
5046                 }
5047                 if (!info->linking)
5048                         break;
5049                 if (br_vlan_enabled(br_dev)) {
5050                         br_vlan_get_proto(br_dev, &proto);
5051                         if (proto == ETH_P_8021AD) {
5052                                 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5053                                 return -EOPNOTSUPP;
5054                         }
5055                 }
5056                 if (is_vlan_dev(upper_dev) &&
5057                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5058                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5059                         return -EOPNOTSUPP;
5060                 }
5061                 if (netif_is_macvlan(upper_dev) &&
5062                     !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
5063                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5064                         return -EOPNOTSUPP;
5065                 }
5066                 break;
5067         case NETDEV_CHANGEUPPER:
5068                 upper_dev = info->upper_dev;
5069                 if (info->linking)
5070                         break;
5071                 if (is_vlan_dev(upper_dev))
5072                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5073                 if (netif_is_macvlan(upper_dev))
5074                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5075                 break;
5076         }
5077
5078         return 0;
5079 }
5080
5081 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5082                                             unsigned long event, void *ptr)
5083 {
5084         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5085         struct netdev_notifier_changeupper_info *info = ptr;
5086         struct netlink_ext_ack *extack;
5087         struct net_device *upper_dev;
5088
5089         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5090                 return 0;
5091
5092         extack = netdev_notifier_info_to_extack(&info->info);
5093         upper_dev = info->upper_dev;
5094
5095         if (!netif_is_l3_master(upper_dev)) {
5096                 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5097                 return -EOPNOTSUPP;
5098         }
5099
5100         return 0;
5101 }
5102
5103 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5104                                           struct net_device *dev,
5105                                           unsigned long event, void *ptr)
5106 {
5107         struct netdev_notifier_changeupper_info *cu_info;
5108         struct netdev_notifier_info *info = ptr;
5109         struct netlink_ext_ack *extack;
5110         struct net_device *upper_dev;
5111
5112         extack = netdev_notifier_info_to_extack(info);
5113
5114         switch (event) {
5115         case NETDEV_CHANGEUPPER:
5116                 cu_info = container_of(info,
5117                                        struct netdev_notifier_changeupper_info,
5118                                        info);
5119                 upper_dev = cu_info->upper_dev;
5120                 if (!netif_is_bridge_master(upper_dev))
5121                         return 0;
5122                 if (!mlxsw_sp_lower_get(upper_dev))
5123                         return 0;
5124                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5125                         return -EOPNOTSUPP;
5126                 if (cu_info->linking) {
5127                         if (!netif_running(dev))
5128                                 return 0;
5129                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
5130                          * device needs to be mapped to a VLAN, but at this
5131                          * point no VLANs are configured on the VxLAN device
5132                          */
5133                         if (br_vlan_enabled(upper_dev))
5134                                 return 0;
5135                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5136                                                           dev, 0, extack);
5137                 } else {
5138                         /* VLANs were already flushed, which triggered the
5139                          * necessary cleanup
5140                          */
5141                         if (br_vlan_enabled(upper_dev))
5142                                 return 0;
5143                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5144                 }
5145                 break;
5146         case NETDEV_PRE_UP:
5147                 upper_dev = netdev_master_upper_dev_get(dev);
5148                 if (!upper_dev)
5149                         return 0;
5150                 if (!netif_is_bridge_master(upper_dev))
5151                         return 0;
5152                 if (!mlxsw_sp_lower_get(upper_dev))
5153                         return 0;
5154                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5155                                                   extack);
5156         case NETDEV_DOWN:
5157                 upper_dev = netdev_master_upper_dev_get(dev);
5158                 if (!upper_dev)
5159                         return 0;
5160                 if (!netif_is_bridge_master(upper_dev))
5161                         return 0;
5162                 if (!mlxsw_sp_lower_get(upper_dev))
5163                         return 0;
5164                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5165                 break;
5166         }
5167
5168         return 0;
5169 }
5170
5171 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5172                                     unsigned long event, void *ptr)
5173 {
5174         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5175         struct mlxsw_sp_span_entry *span_entry;
5176         struct mlxsw_sp *mlxsw_sp;
5177         int err = 0;
5178
5179         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5180         if (event == NETDEV_UNREGISTER) {
5181                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5182                 if (span_entry)
5183                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5184         }
5185         mlxsw_sp_span_respin(mlxsw_sp);
5186
5187         if (netif_is_vxlan(dev))
5188                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5189         else if (mlxsw_sp_port_dev_check(dev))
5190                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
5191         else if (netif_is_lag_master(dev))
5192                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5193         else if (is_vlan_dev(dev))
5194                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
5195         else if (netif_is_bridge_master(dev))
5196                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
5197         else if (netif_is_macvlan(dev))
5198                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5199
5200         return notifier_from_errno(err);
5201 }
5202
5203 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
5204         .notifier_call = mlxsw_sp_inetaddr_valid_event,
5205 };
5206
5207 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
5208         .notifier_call = mlxsw_sp_inet6addr_valid_event,
5209 };
5210
5211 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5212         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5213         {0, },
5214 };
5215
5216 static struct pci_driver mlxsw_sp1_pci_driver = {
5217         .name = mlxsw_sp1_driver_name,
5218         .id_table = mlxsw_sp1_pci_id_table,
5219 };
5220
5221 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5222         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5223         {0, },
5224 };
5225
5226 static struct pci_driver mlxsw_sp2_pci_driver = {
5227         .name = mlxsw_sp2_driver_name,
5228         .id_table = mlxsw_sp2_pci_id_table,
5229 };
5230
5231 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5232         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5233         {0, },
5234 };
5235
5236 static struct pci_driver mlxsw_sp3_pci_driver = {
5237         .name = mlxsw_sp3_driver_name,
5238         .id_table = mlxsw_sp3_pci_id_table,
5239 };
5240
5241 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5242         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5243         {0, },
5244 };
5245
5246 static struct pci_driver mlxsw_sp4_pci_driver = {
5247         .name = mlxsw_sp4_driver_name,
5248         .id_table = mlxsw_sp4_pci_id_table,
5249 };
5250
5251 static int __init mlxsw_sp_module_init(void)
5252 {
5253         int err;
5254
5255         register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5256         register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5257
5258         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5259         if (err)
5260                 goto err_sp1_core_driver_register;
5261
5262         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5263         if (err)
5264                 goto err_sp2_core_driver_register;
5265
5266         err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5267         if (err)
5268                 goto err_sp3_core_driver_register;
5269
5270         err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5271         if (err)
5272                 goto err_sp4_core_driver_register;
5273
5274         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5275         if (err)
5276                 goto err_sp1_pci_driver_register;
5277
5278         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5279         if (err)
5280                 goto err_sp2_pci_driver_register;
5281
5282         err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5283         if (err)
5284                 goto err_sp3_pci_driver_register;
5285
5286         err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5287         if (err)
5288                 goto err_sp4_pci_driver_register;
5289
5290         return 0;
5291
5292 err_sp4_pci_driver_register:
5293         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5294 err_sp3_pci_driver_register:
5295         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5296 err_sp2_pci_driver_register:
5297         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5298 err_sp1_pci_driver_register:
5299         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5300 err_sp4_core_driver_register:
5301         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5302 err_sp3_core_driver_register:
5303         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5304 err_sp2_core_driver_register:
5305         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5306 err_sp1_core_driver_register:
5307         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5308         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5309         return err;
5310 }
5311
5312 static void __exit mlxsw_sp_module_exit(void)
5313 {
5314         mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5315         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5316         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5317         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5318         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5319         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5320         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5321         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5322         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5323         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5324 }
5325
5326 module_init(mlxsw_sp_module_init);
5327 module_exit(mlxsw_sp_module_exit);
5328
5329 MODULE_LICENSE("Dual BSD/GPL");
5330 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5331 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5332 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5333 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5334 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5335 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5336 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5337 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5338 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5339 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);