65e8407f4646b5c3bb46f6b3af1faaf5983fdf8a
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <net/switchdev.h>
27 #include <net/pkt_cls.h>
28 #include <net/netevent.h>
29 #include <net/addrconf.h>
30
31 #include "spectrum.h"
32 #include "pci.h"
33 #include "core.h"
34 #include "core_env.h"
35 #include "reg.h"
36 #include "port.h"
37 #include "trap.h"
38 #include "txheader.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_acl_flex_actions.h"
42 #include "spectrum_span.h"
43 #include "spectrum_ptp.h"
44 #include "spectrum_trap.h"
45
46 #define MLXSW_SP1_FWREV_MAJOR 13
47 #define MLXSW_SP1_FWREV_MINOR 2008
48 #define MLXSW_SP1_FWREV_SUBMINOR 1310
49 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
50
51 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
52         .major = MLXSW_SP1_FWREV_MAJOR,
53         .minor = MLXSW_SP1_FWREV_MINOR,
54         .subminor = MLXSW_SP1_FWREV_SUBMINOR,
55         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
56 };
57
58 #define MLXSW_SP1_FW_FILENAME \
59         "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
60         "." __stringify(MLXSW_SP1_FWREV_MINOR) \
61         "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
62
63 #define MLXSW_SP2_FWREV_MAJOR 29
64 #define MLXSW_SP2_FWREV_MINOR 2008
65 #define MLXSW_SP2_FWREV_SUBMINOR 1310
66
67 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
68         .major = MLXSW_SP2_FWREV_MAJOR,
69         .minor = MLXSW_SP2_FWREV_MINOR,
70         .subminor = MLXSW_SP2_FWREV_SUBMINOR,
71 };
72
73 #define MLXSW_SP2_FW_FILENAME \
74         "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
75         "." __stringify(MLXSW_SP2_FWREV_MINOR) \
76         "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
77
78 #define MLXSW_SP3_FWREV_MAJOR 30
79 #define MLXSW_SP3_FWREV_MINOR 2008
80 #define MLXSW_SP3_FWREV_SUBMINOR 1310
81
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83         .major = MLXSW_SP3_FWREV_MAJOR,
84         .minor = MLXSW_SP3_FWREV_MINOR,
85         .subminor = MLXSW_SP3_FWREV_SUBMINOR,
86 };
87
88 #define MLXSW_SP3_FW_FILENAME \
89         "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90         "." __stringify(MLXSW_SP3_FWREV_MINOR) \
91         "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
92
93 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
94 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
95 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
96
97 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
98         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
99 };
100 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
101         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
102 };
103
104 /* tx_hdr_version
105  * Tx header version.
106  * Must be set to 1.
107  */
108 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
109
110 /* tx_hdr_ctl
111  * Packet control type.
112  * 0 - Ethernet control (e.g. EMADs, LACP)
113  * 1 - Ethernet data
114  */
115 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
116
117 /* tx_hdr_proto
118  * Packet protocol type. Must be set to 1 (Ethernet).
119  */
120 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
121
122 /* tx_hdr_rx_is_router
123  * Packet is sent from the router. Valid for data packets only.
124  */
125 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
126
127 /* tx_hdr_fid_valid
128  * Indicates if the 'fid' field is valid and should be used for
129  * forwarding lookup. Valid for data packets only.
130  */
131 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
132
133 /* tx_hdr_swid
134  * Switch partition ID. Must be set to 0.
135  */
136 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
137
138 /* tx_hdr_control_tclass
139  * Indicates if the packet should use the control TClass and not one
140  * of the data TClasses.
141  */
142 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
143
144 /* tx_hdr_etclass
145  * Egress TClass to be used on the egress device on the egress port.
146  */
147 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
148
149 /* tx_hdr_port_mid
150  * Destination local port for unicast packets.
151  * Destination multicast ID for multicast packets.
152  *
153  * Control packets are directed to a specific egress port, while data
154  * packets are transmitted through the CPU port (0) into the switch partition,
155  * where forwarding rules are applied.
156  */
157 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
158
159 /* tx_hdr_fid
160  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
161  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
162  * Valid for data packets only.
163  */
164 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
165
166 /* tx_hdr_type
167  * 0 - Data packets
168  * 6 - Control packets
169  */
170 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
171
172 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
173                               unsigned int counter_index, u64 *packets,
174                               u64 *bytes)
175 {
176         char mgpc_pl[MLXSW_REG_MGPC_LEN];
177         int err;
178
179         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
180                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
181         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
182         if (err)
183                 return err;
184         if (packets)
185                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
186         if (bytes)
187                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
188         return 0;
189 }
190
191 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
192                                        unsigned int counter_index)
193 {
194         char mgpc_pl[MLXSW_REG_MGPC_LEN];
195
196         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
197                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
198         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
199 }
200
201 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
202                                 unsigned int *p_counter_index)
203 {
204         int err;
205
206         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
207                                      p_counter_index);
208         if (err)
209                 return err;
210         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
211         if (err)
212                 goto err_counter_clear;
213         return 0;
214
215 err_counter_clear:
216         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
217                               *p_counter_index);
218         return err;
219 }
220
221 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
222                                 unsigned int counter_index)
223 {
224          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
225                                counter_index);
226 }
227
228 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
229                                      const struct mlxsw_tx_info *tx_info)
230 {
231         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
232
233         memset(txhdr, 0, MLXSW_TXHDR_LEN);
234
235         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
236         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
237         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
238         mlxsw_tx_hdr_swid_set(txhdr, 0);
239         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
240         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
241         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
242 }
243
244 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
245 {
246         switch (state) {
247         case BR_STATE_FORWARDING:
248                 return MLXSW_REG_SPMS_STATE_FORWARDING;
249         case BR_STATE_LEARNING:
250                 return MLXSW_REG_SPMS_STATE_LEARNING;
251         case BR_STATE_LISTENING:
252         case BR_STATE_DISABLED:
253         case BR_STATE_BLOCKING:
254                 return MLXSW_REG_SPMS_STATE_DISCARDING;
255         default:
256                 BUG();
257         }
258 }
259
260 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
261                               u8 state)
262 {
263         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
264         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
265         char *spms_pl;
266         int err;
267
268         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
269         if (!spms_pl)
270                 return -ENOMEM;
271         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
272         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
273
274         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
275         kfree(spms_pl);
276         return err;
277 }
278
279 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
280 {
281         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
282         int err;
283
284         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
285         if (err)
286                 return err;
287         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
288         return 0;
289 }
290
291 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
292                                    bool is_up)
293 {
294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295         char paos_pl[MLXSW_REG_PAOS_LEN];
296
297         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
298                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
299                             MLXSW_PORT_ADMIN_STATUS_DOWN);
300         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
301 }
302
303 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
304                                       unsigned char *addr)
305 {
306         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
307         char ppad_pl[MLXSW_REG_PPAD_LEN];
308
309         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
310         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
311         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
312 }
313
314 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
315 {
316         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
317         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
318
319         ether_addr_copy(addr, mlxsw_sp->base_mac);
320         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
321         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
322 }
323
324 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
325 {
326         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327         char pmtu_pl[MLXSW_REG_PMTU_LEN];
328         int err;
329
330         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
331         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
332         if (err)
333                 return err;
334
335         *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
336         return 0;
337 }
338
339 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
340 {
341         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
342         char pmtu_pl[MLXSW_REG_PMTU_LEN];
343
344         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
345         if (mtu > mlxsw_sp_port->max_mtu)
346                 return -EINVAL;
347
348         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
349         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
350 }
351
352 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
353 {
354         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
355         char pspa_pl[MLXSW_REG_PSPA_LEN];
356
357         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
358         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
359 }
360
361 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
362 {
363         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364         char svpe_pl[MLXSW_REG_SVPE_LEN];
365
366         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
367         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
368 }
369
370 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
371                                    bool learn_enable)
372 {
373         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
374         char *spvmlr_pl;
375         int err;
376
377         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
378         if (!spvmlr_pl)
379                 return -ENOMEM;
380         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
381                               learn_enable);
382         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
383         kfree(spvmlr_pl);
384         return err;
385 }
386
387 static int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
388 {
389         switch (ethtype) {
390         case ETH_P_8021Q:
391                 *p_sver_type = 0;
392                 break;
393         case ETH_P_8021AD:
394                 *p_sver_type = 1;
395                 break;
396         default:
397                 return -EINVAL;
398         }
399
400         return 0;
401 }
402
403 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
404                                     u16 vid, u16 ethtype)
405 {
406         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
407         char spvid_pl[MLXSW_REG_SPVID_LEN];
408         u8 sver_type;
409         int err;
410
411         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
412         if (err)
413                 return err;
414
415         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
416                              sver_type);
417
418         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
419 }
420
421 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
422                                             bool allow)
423 {
424         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
425         char spaft_pl[MLXSW_REG_SPAFT_LEN];
426
427         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
428         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
429 }
430
431 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
432                            u16 ethtype)
433 {
434         int err;
435
436         if (!vid) {
437                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
438                 if (err)
439                         return err;
440         } else {
441                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
442                 if (err)
443                         return err;
444                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
445                 if (err)
446                         goto err_port_allow_untagged_set;
447         }
448
449         mlxsw_sp_port->pvid = vid;
450         return 0;
451
452 err_port_allow_untagged_set:
453         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
454         return err;
455 }
456
457 static int
458 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
459 {
460         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
461         char sspr_pl[MLXSW_REG_SSPR_LEN];
462
463         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
464         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
465 }
466
467 static int
468 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
469                               struct mlxsw_sp_port_mapping *port_mapping)
470 {
471         char pmlp_pl[MLXSW_REG_PMLP_LEN];
472         bool separate_rxtx;
473         u8 module;
474         u8 width;
475         int err;
476         int i;
477
478         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
479         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
480         if (err)
481                 return err;
482         module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
483         width = mlxsw_reg_pmlp_width_get(pmlp_pl);
484         separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
485
486         if (width && !is_power_of_2(width)) {
487                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
488                         local_port);
489                 return -EINVAL;
490         }
491
492         for (i = 0; i < width; i++) {
493                 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
494                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
495                                 local_port);
496                         return -EINVAL;
497                 }
498                 if (separate_rxtx &&
499                     mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
500                     mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
501                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
502                                 local_port);
503                         return -EINVAL;
504                 }
505                 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
506                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
507                                 local_port);
508                         return -EINVAL;
509                 }
510         }
511
512         port_mapping->module = module;
513         port_mapping->width = width;
514         port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
515         return 0;
516 }
517
518 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
519 {
520         struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
521         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
522         char pmlp_pl[MLXSW_REG_PMLP_LEN];
523         int i;
524
525         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
526         mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
527         for (i = 0; i < port_mapping->width; i++) {
528                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
529                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
530         }
531
532         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
533 }
534
535 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
536 {
537         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
538         char pmlp_pl[MLXSW_REG_PMLP_LEN];
539
540         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
541         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
542         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
543 }
544
545 static int mlxsw_sp_port_open(struct net_device *dev)
546 {
547         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
548         int err;
549
550         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
551         if (err)
552                 return err;
553         netif_start_queue(dev);
554         return 0;
555 }
556
557 static int mlxsw_sp_port_stop(struct net_device *dev)
558 {
559         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
560
561         netif_stop_queue(dev);
562         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
563 }
564
565 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
566                                       struct net_device *dev)
567 {
568         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
569         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
570         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
571         const struct mlxsw_tx_info tx_info = {
572                 .local_port = mlxsw_sp_port->local_port,
573                 .is_emad = false,
574         };
575         u64 len;
576         int err;
577
578         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
579                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
580                 dev_kfree_skb_any(skb);
581                 return NETDEV_TX_OK;
582         }
583
584         memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
585
586         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
587                 return NETDEV_TX_BUSY;
588
589         if (eth_skb_pad(skb)) {
590                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
591                 return NETDEV_TX_OK;
592         }
593
594         mlxsw_sp_txhdr_construct(skb, &tx_info);
595         /* TX header is consumed by HW on the way so we shouldn't count its
596          * bytes as being sent.
597          */
598         len = skb->len - MLXSW_TXHDR_LEN;
599
600         /* Due to a race we might fail here because of a full queue. In that
601          * unlikely case we simply drop the packet.
602          */
603         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
604
605         if (!err) {
606                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
607                 u64_stats_update_begin(&pcpu_stats->syncp);
608                 pcpu_stats->tx_packets++;
609                 pcpu_stats->tx_bytes += len;
610                 u64_stats_update_end(&pcpu_stats->syncp);
611         } else {
612                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
613                 dev_kfree_skb_any(skb);
614         }
615         return NETDEV_TX_OK;
616 }
617
618 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
619 {
620 }
621
622 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
623 {
624         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
625         struct sockaddr *addr = p;
626         int err;
627
628         if (!is_valid_ether_addr(addr->sa_data))
629                 return -EADDRNOTAVAIL;
630
631         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
632         if (err)
633                 return err;
634         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
635         return 0;
636 }
637
638 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
639 {
640         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
641         struct mlxsw_sp_hdroom orig_hdroom;
642         struct mlxsw_sp_hdroom hdroom;
643         int err;
644
645         orig_hdroom = *mlxsw_sp_port->hdroom;
646
647         hdroom = orig_hdroom;
648         hdroom.mtu = mtu;
649         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
650
651         err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
652         if (err) {
653                 netdev_err(dev, "Failed to configure port's headroom\n");
654                 return err;
655         }
656
657         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
658         if (err)
659                 goto err_port_mtu_set;
660         dev->mtu = mtu;
661         return 0;
662
663 err_port_mtu_set:
664         mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
665         return err;
666 }
667
668 static int
669 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
670                              struct rtnl_link_stats64 *stats)
671 {
672         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
673         struct mlxsw_sp_port_pcpu_stats *p;
674         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
675         u32 tx_dropped = 0;
676         unsigned int start;
677         int i;
678
679         for_each_possible_cpu(i) {
680                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
681                 do {
682                         start = u64_stats_fetch_begin_irq(&p->syncp);
683                         rx_packets      = p->rx_packets;
684                         rx_bytes        = p->rx_bytes;
685                         tx_packets      = p->tx_packets;
686                         tx_bytes        = p->tx_bytes;
687                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
688
689                 stats->rx_packets       += rx_packets;
690                 stats->rx_bytes         += rx_bytes;
691                 stats->tx_packets       += tx_packets;
692                 stats->tx_bytes         += tx_bytes;
693                 /* tx_dropped is u32, updated without syncp protection. */
694                 tx_dropped      += p->tx_dropped;
695         }
696         stats->tx_dropped       = tx_dropped;
697         return 0;
698 }
699
700 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
701 {
702         switch (attr_id) {
703         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
704                 return true;
705         }
706
707         return false;
708 }
709
710 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
711                                            void *sp)
712 {
713         switch (attr_id) {
714         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
715                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
716         }
717
718         return -EINVAL;
719 }
720
721 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
722                                 int prio, char *ppcnt_pl)
723 {
724         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
725         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
726
727         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
728         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
729 }
730
731 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
732                                       struct rtnl_link_stats64 *stats)
733 {
734         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
735         int err;
736
737         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
738                                           0, ppcnt_pl);
739         if (err)
740                 goto out;
741
742         stats->tx_packets =
743                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
744         stats->rx_packets =
745                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
746         stats->tx_bytes =
747                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
748         stats->rx_bytes =
749                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
750         stats->multicast =
751                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
752
753         stats->rx_crc_errors =
754                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
755         stats->rx_frame_errors =
756                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
757
758         stats->rx_length_errors = (
759                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
760                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
761                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
762
763         stats->rx_errors = (stats->rx_crc_errors +
764                 stats->rx_frame_errors + stats->rx_length_errors);
765
766 out:
767         return err;
768 }
769
770 static void
771 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
772                             struct mlxsw_sp_port_xstats *xstats)
773 {
774         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
775         int err, i;
776
777         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
778                                           ppcnt_pl);
779         if (!err)
780                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
781
782         for (i = 0; i < TC_MAX_QUEUE; i++) {
783                 err = mlxsw_sp_port_get_stats_raw(dev,
784                                                   MLXSW_REG_PPCNT_TC_CONG_TC,
785                                                   i, ppcnt_pl);
786                 if (!err)
787                         xstats->wred_drop[i] =
788                                 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
789
790                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
791                                                   i, ppcnt_pl);
792                 if (err)
793                         continue;
794
795                 xstats->backlog[i] =
796                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
797                 xstats->tail_drop[i] =
798                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
799         }
800
801         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
802                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
803                                                   i, ppcnt_pl);
804                 if (err)
805                         continue;
806
807                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
808                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
809         }
810 }
811
812 static void update_stats_cache(struct work_struct *work)
813 {
814         struct mlxsw_sp_port *mlxsw_sp_port =
815                 container_of(work, struct mlxsw_sp_port,
816                              periodic_hw_stats.update_dw.work);
817
818         if (!netif_carrier_ok(mlxsw_sp_port->dev))
819                 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
820                  * necessary when port goes down.
821                  */
822                 goto out;
823
824         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
825                                    &mlxsw_sp_port->periodic_hw_stats.stats);
826         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
827                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
828
829 out:
830         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
831                                MLXSW_HW_STATS_UPDATE_TIME);
832 }
833
834 /* Return the stats from a cache that is updated periodically,
835  * as this function might get called in an atomic context.
836  */
837 static void
838 mlxsw_sp_port_get_stats64(struct net_device *dev,
839                           struct rtnl_link_stats64 *stats)
840 {
841         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
842
843         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
844 }
845
846 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
847                                     u16 vid_begin, u16 vid_end,
848                                     bool is_member, bool untagged)
849 {
850         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
851         char *spvm_pl;
852         int err;
853
854         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
855         if (!spvm_pl)
856                 return -ENOMEM;
857
858         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
859                             vid_end, is_member, untagged);
860         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
861         kfree(spvm_pl);
862         return err;
863 }
864
865 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
866                            u16 vid_end, bool is_member, bool untagged)
867 {
868         u16 vid, vid_e;
869         int err;
870
871         for (vid = vid_begin; vid <= vid_end;
872              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
873                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
874                             vid_end);
875
876                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
877                                                is_member, untagged);
878                 if (err)
879                         return err;
880         }
881
882         return 0;
883 }
884
885 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
886                                      bool flush_default)
887 {
888         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
889
890         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
891                                  &mlxsw_sp_port->vlans_list, list) {
892                 if (!flush_default &&
893                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
894                         continue;
895                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
896         }
897 }
898
899 static void
900 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
901 {
902         if (mlxsw_sp_port_vlan->bridge_port)
903                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
904         else if (mlxsw_sp_port_vlan->fid)
905                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
906 }
907
908 struct mlxsw_sp_port_vlan *
909 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
910 {
911         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
912         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
913         int err;
914
915         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
916         if (mlxsw_sp_port_vlan)
917                 return ERR_PTR(-EEXIST);
918
919         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
920         if (err)
921                 return ERR_PTR(err);
922
923         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
924         if (!mlxsw_sp_port_vlan) {
925                 err = -ENOMEM;
926                 goto err_port_vlan_alloc;
927         }
928
929         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
930         mlxsw_sp_port_vlan->vid = vid;
931         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
932
933         return mlxsw_sp_port_vlan;
934
935 err_port_vlan_alloc:
936         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
937         return ERR_PTR(err);
938 }
939
940 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
941 {
942         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
943         u16 vid = mlxsw_sp_port_vlan->vid;
944
945         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
946         list_del(&mlxsw_sp_port_vlan->list);
947         kfree(mlxsw_sp_port_vlan);
948         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
949 }
950
951 static int mlxsw_sp_port_add_vid(struct net_device *dev,
952                                  __be16 __always_unused proto, u16 vid)
953 {
954         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
955
956         /* VLAN 0 is added to HW filter when device goes up, but it is
957          * reserved in our case, so simply return.
958          */
959         if (!vid)
960                 return 0;
961
962         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
963 }
964
965 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
966                                   __be16 __always_unused proto, u16 vid)
967 {
968         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
969         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
970
971         /* VLAN 0 is removed from HW filter when device goes down, but
972          * it is reserved in our case, so simply return.
973          */
974         if (!vid)
975                 return 0;
976
977         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
978         if (!mlxsw_sp_port_vlan)
979                 return 0;
980         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
981
982         return 0;
983 }
984
985 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
986                                    struct flow_block_offload *f)
987 {
988         switch (f->binder_type) {
989         case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
990                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
991         case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
992                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
993         case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
994                 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
995         default:
996                 return -EOPNOTSUPP;
997         }
998 }
999
1000 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1001                              void *type_data)
1002 {
1003         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1004
1005         switch (type) {
1006         case TC_SETUP_BLOCK:
1007                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1008         case TC_SETUP_QDISC_RED:
1009                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1010         case TC_SETUP_QDISC_PRIO:
1011                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1012         case TC_SETUP_QDISC_ETS:
1013                 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1014         case TC_SETUP_QDISC_TBF:
1015                 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1016         case TC_SETUP_QDISC_FIFO:
1017                 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1018         default:
1019                 return -EOPNOTSUPP;
1020         }
1021 }
1022
1023 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1024 {
1025         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1026
1027         if (!enable) {
1028                 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1029                     mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1030                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1031                         return -EINVAL;
1032                 }
1033                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1034                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1035         } else {
1036                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1037                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1038         }
1039         return 0;
1040 }
1041
1042 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1043 {
1044         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1045         char pplr_pl[MLXSW_REG_PPLR_LEN];
1046         int err;
1047
1048         if (netif_running(dev))
1049                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1050
1051         mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1052         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1053                               pplr_pl);
1054
1055         if (netif_running(dev))
1056                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1057
1058         return err;
1059 }
1060
1061 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1062
1063 static int mlxsw_sp_handle_feature(struct net_device *dev,
1064                                    netdev_features_t wanted_features,
1065                                    netdev_features_t feature,
1066                                    mlxsw_sp_feature_handler feature_handler)
1067 {
1068         netdev_features_t changes = wanted_features ^ dev->features;
1069         bool enable = !!(wanted_features & feature);
1070         int err;
1071
1072         if (!(changes & feature))
1073                 return 0;
1074
1075         err = feature_handler(dev, enable);
1076         if (err) {
1077                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1078                            enable ? "Enable" : "Disable", &feature, err);
1079                 return err;
1080         }
1081
1082         if (enable)
1083                 dev->features |= feature;
1084         else
1085                 dev->features &= ~feature;
1086
1087         return 0;
1088 }
1089 static int mlxsw_sp_set_features(struct net_device *dev,
1090                                  netdev_features_t features)
1091 {
1092         netdev_features_t oper_features = dev->features;
1093         int err = 0;
1094
1095         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1096                                        mlxsw_sp_feature_hw_tc);
1097         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1098                                        mlxsw_sp_feature_loopback);
1099
1100         if (err) {
1101                 dev->features = oper_features;
1102                 return -EINVAL;
1103         }
1104
1105         return 0;
1106 }
1107
1108 static struct devlink_port *
1109 mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1110 {
1111         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1112         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1113
1114         return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1115                                                 mlxsw_sp_port->local_port);
1116 }
1117
1118 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1119                                       struct ifreq *ifr)
1120 {
1121         struct hwtstamp_config config;
1122         int err;
1123
1124         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1125                 return -EFAULT;
1126
1127         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1128                                                              &config);
1129         if (err)
1130                 return err;
1131
1132         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1133                 return -EFAULT;
1134
1135         return 0;
1136 }
1137
1138 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1139                                       struct ifreq *ifr)
1140 {
1141         struct hwtstamp_config config;
1142         int err;
1143
1144         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1145                                                              &config);
1146         if (err)
1147                 return err;
1148
1149         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1150                 return -EFAULT;
1151
1152         return 0;
1153 }
1154
1155 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1156 {
1157         struct hwtstamp_config config = {0};
1158
1159         mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1160 }
1161
1162 static int
1163 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1164 {
1165         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1166
1167         switch (cmd) {
1168         case SIOCSHWTSTAMP:
1169                 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1170         case SIOCGHWTSTAMP:
1171                 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1172         default:
1173                 return -EOPNOTSUPP;
1174         }
1175 }
1176
1177 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1178         .ndo_open               = mlxsw_sp_port_open,
1179         .ndo_stop               = mlxsw_sp_port_stop,
1180         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1181         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1182         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1183         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1184         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1185         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1186         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1187         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1188         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1189         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1190         .ndo_set_features       = mlxsw_sp_set_features,
1191         .ndo_get_devlink_port   = mlxsw_sp_port_get_devlink_port,
1192         .ndo_do_ioctl           = mlxsw_sp_port_ioctl,
1193 };
1194
1195 static int
1196 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1197 {
1198         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1199         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1200         const struct mlxsw_sp_port_type_speed_ops *ops;
1201         char ptys_pl[MLXSW_REG_PTYS_LEN];
1202         u32 eth_proto_cap_masked;
1203         int err;
1204
1205         ops = mlxsw_sp->port_type_speed_ops;
1206
1207         /* Set advertised speeds to speeds supported by both the driver
1208          * and the device.
1209          */
1210         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1211                                0, false);
1212         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1213         if (err)
1214                 return err;
1215
1216         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1217                                  &eth_proto_admin, &eth_proto_oper);
1218         eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1219         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1220                                eth_proto_cap_masked,
1221                                mlxsw_sp_port->link.autoneg);
1222         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1223 }
1224
1225 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1226 {
1227         const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1228         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1229         char ptys_pl[MLXSW_REG_PTYS_LEN];
1230         u32 eth_proto_oper;
1231         int err;
1232
1233         port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1234         port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1235                                                mlxsw_sp_port->local_port, 0,
1236                                                false);
1237         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1238         if (err)
1239                 return err;
1240         port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1241                                                  &eth_proto_oper);
1242         *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1243         return 0;
1244 }
1245
1246 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1247                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1248                           bool dwrr, u8 dwrr_weight)
1249 {
1250         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1251         char qeec_pl[MLXSW_REG_QEEC_LEN];
1252
1253         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1254                             next_index);
1255         mlxsw_reg_qeec_de_set(qeec_pl, true);
1256         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1257         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1258         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1259 }
1260
1261 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1262                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1263                                   u8 next_index, u32 maxrate, u8 burst_size)
1264 {
1265         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1266         char qeec_pl[MLXSW_REG_QEEC_LEN];
1267
1268         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1269                             next_index);
1270         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1271         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1272         mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1273         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1274 }
1275
1276 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1277                                     enum mlxsw_reg_qeec_hr hr, u8 index,
1278                                     u8 next_index, u32 minrate)
1279 {
1280         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1281         char qeec_pl[MLXSW_REG_QEEC_LEN];
1282
1283         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1284                             next_index);
1285         mlxsw_reg_qeec_mise_set(qeec_pl, true);
1286         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1287
1288         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1289 }
1290
1291 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1292                               u8 switch_prio, u8 tclass)
1293 {
1294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1295         char qtct_pl[MLXSW_REG_QTCT_LEN];
1296
1297         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1298                             tclass);
1299         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1300 }
1301
1302 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1303 {
1304         int err, i;
1305
1306         /* Setup the elements hierarcy, so that each TC is linked to
1307          * one subgroup, which are all member in the same group.
1308          */
1309         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1310                                     MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1311         if (err)
1312                 return err;
1313         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1314                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1315                                             MLXSW_REG_QEEC_HR_SUBGROUP, i,
1316                                             0, false, 0);
1317                 if (err)
1318                         return err;
1319         }
1320         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1321                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1322                                             MLXSW_REG_QEEC_HR_TC, i, i,
1323                                             false, 0);
1324                 if (err)
1325                         return err;
1326
1327                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1328                                             MLXSW_REG_QEEC_HR_TC,
1329                                             i + 8, i,
1330                                             true, 100);
1331                 if (err)
1332                         return err;
1333         }
1334
1335         /* Make sure the max shaper is disabled in all hierarchies that support
1336          * it. Note that this disables ptps (PTP shaper), but that is intended
1337          * for the initial configuration.
1338          */
1339         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1340                                             MLXSW_REG_QEEC_HR_PORT, 0, 0,
1341                                             MLXSW_REG_QEEC_MAS_DIS, 0);
1342         if (err)
1343                 return err;
1344         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1345                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1346                                                     MLXSW_REG_QEEC_HR_SUBGROUP,
1347                                                     i, 0,
1348                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1349                 if (err)
1350                         return err;
1351         }
1352         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1353                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1354                                                     MLXSW_REG_QEEC_HR_TC,
1355                                                     i, i,
1356                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1357                 if (err)
1358                         return err;
1359
1360                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1361                                                     MLXSW_REG_QEEC_HR_TC,
1362                                                     i + 8, i,
1363                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1364                 if (err)
1365                         return err;
1366         }
1367
1368         /* Configure the min shaper for multicast TCs. */
1369         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1370                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1371                                                MLXSW_REG_QEEC_HR_TC,
1372                                                i + 8, i,
1373                                                MLXSW_REG_QEEC_MIS_MIN);
1374                 if (err)
1375                         return err;
1376         }
1377
1378         /* Map all priorities to traffic class 0. */
1379         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1380                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1381                 if (err)
1382                         return err;
1383         }
1384
1385         return 0;
1386 }
1387
1388 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1389                                         bool enable)
1390 {
1391         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1392         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1393
1394         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1395         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1396 }
1397
1398 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1399 {
1400         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1401         u8 module = mlxsw_sp_port->mapping.module;
1402         u64 overheat_counter;
1403         int err;
1404
1405         err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
1406                                                     &overheat_counter);
1407         if (err)
1408                 return err;
1409
1410         mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1411         return 0;
1412 }
1413
1414 int
1415 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1416                                       bool is_8021ad_tagged,
1417                                       bool is_8021q_tagged)
1418 {
1419         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1420         char spvc_pl[MLXSW_REG_SPVC_LEN];
1421
1422         mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1423                             is_8021ad_tagged, is_8021q_tagged);
1424         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1425 }
1426
1427 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1428                                 u8 split_base_local_port,
1429                                 struct mlxsw_sp_port_mapping *port_mapping)
1430 {
1431         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1432         bool split = !!split_base_local_port;
1433         struct mlxsw_sp_port *mlxsw_sp_port;
1434         u32 lanes = port_mapping->width;
1435         struct net_device *dev;
1436         bool splittable;
1437         int err;
1438
1439         splittable = lanes > 1 && !split;
1440         err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1441                                    port_mapping->module + 1, split,
1442                                    port_mapping->lane / lanes,
1443                                    splittable, lanes,
1444                                    mlxsw_sp->base_mac,
1445                                    sizeof(mlxsw_sp->base_mac));
1446         if (err) {
1447                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1448                         local_port);
1449                 return err;
1450         }
1451
1452         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1453         if (!dev) {
1454                 err = -ENOMEM;
1455                 goto err_alloc_etherdev;
1456         }
1457         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1458         dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1459         mlxsw_sp_port = netdev_priv(dev);
1460         mlxsw_sp_port->dev = dev;
1461         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1462         mlxsw_sp_port->local_port = local_port;
1463         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1464         mlxsw_sp_port->split = split;
1465         mlxsw_sp_port->split_base_local_port = split_base_local_port;
1466         mlxsw_sp_port->mapping = *port_mapping;
1467         mlxsw_sp_port->link.autoneg = 1;
1468         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1469
1470         mlxsw_sp_port->pcpu_stats =
1471                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1472         if (!mlxsw_sp_port->pcpu_stats) {
1473                 err = -ENOMEM;
1474                 goto err_alloc_stats;
1475         }
1476
1477         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1478                           &update_stats_cache);
1479
1480         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1481         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1482
1483         err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1484         if (err) {
1485                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1486                         mlxsw_sp_port->local_port);
1487                 goto err_port_module_map;
1488         }
1489
1490         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1491         if (err) {
1492                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1493                         mlxsw_sp_port->local_port);
1494                 goto err_port_swid_set;
1495         }
1496
1497         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1498         if (err) {
1499                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1500                         mlxsw_sp_port->local_port);
1501                 goto err_dev_addr_init;
1502         }
1503
1504         netif_carrier_off(dev);
1505
1506         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1507                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1508         dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1509
1510         dev->min_mtu = 0;
1511         dev->max_mtu = ETH_MAX_MTU;
1512
1513         /* Each packet needs to have a Tx header (metadata) on top all other
1514          * headers.
1515          */
1516         dev->needed_headroom = MLXSW_TXHDR_LEN;
1517
1518         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1519         if (err) {
1520                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1521                         mlxsw_sp_port->local_port);
1522                 goto err_port_system_port_mapping_set;
1523         }
1524
1525         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1526         if (err) {
1527                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1528                         mlxsw_sp_port->local_port);
1529                 goto err_port_speed_by_width_set;
1530         }
1531
1532         err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1533                                                             &mlxsw_sp_port->max_speed);
1534         if (err) {
1535                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1536                         mlxsw_sp_port->local_port);
1537                 goto err_max_speed_get;
1538         }
1539
1540         err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1541         if (err) {
1542                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1543                         mlxsw_sp_port->local_port);
1544                 goto err_port_max_mtu_get;
1545         }
1546
1547         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1548         if (err) {
1549                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1550                         mlxsw_sp_port->local_port);
1551                 goto err_port_mtu_set;
1552         }
1553
1554         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1555         if (err)
1556                 goto err_port_admin_status_set;
1557
1558         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1559         if (err) {
1560                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1561                         mlxsw_sp_port->local_port);
1562                 goto err_port_buffers_init;
1563         }
1564
1565         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1566         if (err) {
1567                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1568                         mlxsw_sp_port->local_port);
1569                 goto err_port_ets_init;
1570         }
1571
1572         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1573         if (err) {
1574                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1575                         mlxsw_sp_port->local_port);
1576                 goto err_port_tc_mc_mode;
1577         }
1578
1579         /* ETS and buffers must be initialized before DCB. */
1580         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1581         if (err) {
1582                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1583                         mlxsw_sp_port->local_port);
1584                 goto err_port_dcb_init;
1585         }
1586
1587         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1588         if (err) {
1589                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1590                         mlxsw_sp_port->local_port);
1591                 goto err_port_fids_init;
1592         }
1593
1594         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1595         if (err) {
1596                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1597                         mlxsw_sp_port->local_port);
1598                 goto err_port_qdiscs_init;
1599         }
1600
1601         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1602                                      false);
1603         if (err) {
1604                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1605                         mlxsw_sp_port->local_port);
1606                 goto err_port_vlan_clear;
1607         }
1608
1609         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1610         if (err) {
1611                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1612                         mlxsw_sp_port->local_port);
1613                 goto err_port_nve_init;
1614         }
1615
1616         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1617                                      ETH_P_8021Q);
1618         if (err) {
1619                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1620                         mlxsw_sp_port->local_port);
1621                 goto err_port_pvid_set;
1622         }
1623
1624         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1625                                                        MLXSW_SP_DEFAULT_VID);
1626         if (IS_ERR(mlxsw_sp_port_vlan)) {
1627                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1628                         mlxsw_sp_port->local_port);
1629                 err = PTR_ERR(mlxsw_sp_port_vlan);
1630                 goto err_port_vlan_create;
1631         }
1632         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1633
1634         /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1635          * only packets with 802.1q header as tagged packets.
1636          */
1637         err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1638         if (err) {
1639                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1640                         local_port);
1641                 goto err_port_vlan_classification_set;
1642         }
1643
1644         INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1645                           mlxsw_sp->ptp_ops->shaper_work);
1646
1647         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1648
1649         err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1650         if (err) {
1651                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1652                         mlxsw_sp_port->local_port);
1653                 goto err_port_overheat_init_val_set;
1654         }
1655
1656         err = register_netdev(dev);
1657         if (err) {
1658                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1659                         mlxsw_sp_port->local_port);
1660                 goto err_register_netdev;
1661         }
1662
1663         mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1664                                 mlxsw_sp_port, dev);
1665         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1666         return 0;
1667
1668 err_register_netdev:
1669 err_port_overheat_init_val_set:
1670         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1671 err_port_vlan_classification_set:
1672         mlxsw_sp->ports[local_port] = NULL;
1673         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1674 err_port_vlan_create:
1675 err_port_pvid_set:
1676         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1677 err_port_nve_init:
1678 err_port_vlan_clear:
1679         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1680 err_port_qdiscs_init:
1681         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1682 err_port_fids_init:
1683         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1684 err_port_dcb_init:
1685         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1686 err_port_tc_mc_mode:
1687 err_port_ets_init:
1688         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1689 err_port_buffers_init:
1690 err_port_admin_status_set:
1691 err_port_mtu_set:
1692 err_port_max_mtu_get:
1693 err_max_speed_get:
1694 err_port_speed_by_width_set:
1695 err_port_system_port_mapping_set:
1696 err_dev_addr_init:
1697         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1698 err_port_swid_set:
1699         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1700 err_port_module_map:
1701         free_percpu(mlxsw_sp_port->pcpu_stats);
1702 err_alloc_stats:
1703         free_netdev(dev);
1704 err_alloc_etherdev:
1705         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1706         return err;
1707 }
1708
1709 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1710 {
1711         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1712
1713         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1714         cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1715         mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1716         mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1717         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1718         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1719         mlxsw_sp->ports[local_port] = NULL;
1720         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1721         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1722         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1723         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1724         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1725         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1726         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1727         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1728         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1729         free_percpu(mlxsw_sp_port->pcpu_stats);
1730         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1731         free_netdev(mlxsw_sp_port->dev);
1732         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1733 }
1734
1735 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1736 {
1737         struct mlxsw_sp_port *mlxsw_sp_port;
1738         int err;
1739
1740         mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1741         if (!mlxsw_sp_port)
1742                 return -ENOMEM;
1743
1744         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1745         mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1746
1747         err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1748                                        mlxsw_sp_port,
1749                                        mlxsw_sp->base_mac,
1750                                        sizeof(mlxsw_sp->base_mac));
1751         if (err) {
1752                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1753                 goto err_core_cpu_port_init;
1754         }
1755
1756         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1757         return 0;
1758
1759 err_core_cpu_port_init:
1760         kfree(mlxsw_sp_port);
1761         return err;
1762 }
1763
1764 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1765 {
1766         struct mlxsw_sp_port *mlxsw_sp_port =
1767                                 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1768
1769         mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1770         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1771         kfree(mlxsw_sp_port);
1772 }
1773
1774 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1775 {
1776         return mlxsw_sp->ports[local_port] != NULL;
1777 }
1778
1779 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1780 {
1781         int i;
1782
1783         for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1784                 if (mlxsw_sp_port_created(mlxsw_sp, i))
1785                         mlxsw_sp_port_remove(mlxsw_sp, i);
1786         mlxsw_sp_cpu_port_remove(mlxsw_sp);
1787         kfree(mlxsw_sp->ports);
1788         mlxsw_sp->ports = NULL;
1789 }
1790
1791 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1792 {
1793         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1794         struct mlxsw_sp_port_mapping *port_mapping;
1795         size_t alloc_size;
1796         int i;
1797         int err;
1798
1799         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
1800         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1801         if (!mlxsw_sp->ports)
1802                 return -ENOMEM;
1803
1804         err = mlxsw_sp_cpu_port_create(mlxsw_sp);
1805         if (err)
1806                 goto err_cpu_port_create;
1807
1808         for (i = 1; i < max_ports; i++) {
1809                 port_mapping = mlxsw_sp->port_mapping[i];
1810                 if (!port_mapping)
1811                         continue;
1812                 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
1813                 if (err)
1814                         goto err_port_create;
1815         }
1816         return 0;
1817
1818 err_port_create:
1819         for (i--; i >= 1; i--)
1820                 if (mlxsw_sp_port_created(mlxsw_sp, i))
1821                         mlxsw_sp_port_remove(mlxsw_sp, i);
1822         mlxsw_sp_cpu_port_remove(mlxsw_sp);
1823 err_cpu_port_create:
1824         kfree(mlxsw_sp->ports);
1825         mlxsw_sp->ports = NULL;
1826         return err;
1827 }
1828
1829 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
1830 {
1831         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1832         struct mlxsw_sp_port_mapping port_mapping;
1833         int i;
1834         int err;
1835
1836         mlxsw_sp->port_mapping = kcalloc(max_ports,
1837                                          sizeof(struct mlxsw_sp_port_mapping *),
1838                                          GFP_KERNEL);
1839         if (!mlxsw_sp->port_mapping)
1840                 return -ENOMEM;
1841
1842         for (i = 1; i < max_ports; i++) {
1843                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
1844                 if (err)
1845                         goto err_port_module_info_get;
1846                 if (!port_mapping.width)
1847                         continue;
1848
1849                 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
1850                                                     sizeof(port_mapping),
1851                                                     GFP_KERNEL);
1852                 if (!mlxsw_sp->port_mapping[i]) {
1853                         err = -ENOMEM;
1854                         goto err_port_module_info_dup;
1855                 }
1856         }
1857         return 0;
1858
1859 err_port_module_info_get:
1860 err_port_module_info_dup:
1861         for (i--; i >= 1; i--)
1862                 kfree(mlxsw_sp->port_mapping[i]);
1863         kfree(mlxsw_sp->port_mapping);
1864         return err;
1865 }
1866
1867 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
1868 {
1869         int i;
1870
1871         for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1872                 kfree(mlxsw_sp->port_mapping[i]);
1873         kfree(mlxsw_sp->port_mapping);
1874 }
1875
1876 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
1877 {
1878         u8 offset = (local_port - 1) % max_width;
1879
1880         return local_port - offset;
1881 }
1882
1883 static int
1884 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1885                            struct mlxsw_sp_port_mapping *port_mapping,
1886                            unsigned int count, u8 offset)
1887 {
1888         struct mlxsw_sp_port_mapping split_port_mapping;
1889         int err, i;
1890
1891         split_port_mapping = *port_mapping;
1892         split_port_mapping.width /= count;
1893         for (i = 0; i < count; i++) {
1894                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
1895                                            base_port, &split_port_mapping);
1896                 if (err)
1897                         goto err_port_create;
1898                 split_port_mapping.lane += split_port_mapping.width;
1899         }
1900
1901         return 0;
1902
1903 err_port_create:
1904         for (i--; i >= 0; i--)
1905                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1906                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1907         return err;
1908 }
1909
1910 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1911                                          u8 base_port,
1912                                          unsigned int count, u8 offset)
1913 {
1914         struct mlxsw_sp_port_mapping *port_mapping;
1915         int i;
1916
1917         /* Go over original unsplit ports in the gap and recreate them. */
1918         for (i = 0; i < count * offset; i++) {
1919                 port_mapping = mlxsw_sp->port_mapping[base_port + i];
1920                 if (!port_mapping)
1921                         continue;
1922                 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
1923         }
1924 }
1925
1926 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
1927                                        unsigned int count,
1928                                        unsigned int max_width)
1929 {
1930         enum mlxsw_res_id local_ports_in_x_res_id;
1931         int split_width = max_width / count;
1932
1933         if (split_width == 1)
1934                 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
1935         else if (split_width == 2)
1936                 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
1937         else if (split_width == 4)
1938                 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
1939         else
1940                 return -EINVAL;
1941
1942         if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
1943                 return -EINVAL;
1944         return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
1945 }
1946
1947 static struct mlxsw_sp_port *
1948 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1949 {
1950         if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
1951                 return mlxsw_sp->ports[local_port];
1952         return NULL;
1953 }
1954
1955 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1956                                unsigned int count,
1957                                struct netlink_ext_ack *extack)
1958 {
1959         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1960         struct mlxsw_sp_port_mapping port_mapping;
1961         struct mlxsw_sp_port *mlxsw_sp_port;
1962         int max_width;
1963         u8 base_port;
1964         int offset;
1965         int i;
1966         int err;
1967
1968         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1969         if (!mlxsw_sp_port) {
1970                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1971                         local_port);
1972                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
1973                 return -EINVAL;
1974         }
1975
1976         max_width = mlxsw_core_module_max_width(mlxsw_core,
1977                                                 mlxsw_sp_port->mapping.module);
1978         if (max_width < 0) {
1979                 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
1980                 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
1981                 return max_width;
1982         }
1983
1984         /* Split port with non-max cannot be split. */
1985         if (mlxsw_sp_port->mapping.width != max_width) {
1986                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
1987                 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
1988                 return -EINVAL;
1989         }
1990
1991         offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
1992         if (offset < 0) {
1993                 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
1994                 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
1995                 return -EINVAL;
1996         }
1997
1998         /* Only in case max split is being done, the local port and
1999          * base port may differ.
2000          */
2001         base_port = count == max_width ?
2002                     mlxsw_sp_cluster_base_port_get(local_port, max_width) :
2003                     local_port;
2004
2005         for (i = 0; i < count * offset; i++) {
2006                 /* Expect base port to exist and also the one in the middle in
2007                  * case of maximal split count.
2008                  */
2009                 if (i == 0 || (count == max_width && i == count / 2))
2010                         continue;
2011
2012                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
2013                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2014                         NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
2015                         return -EINVAL;
2016                 }
2017         }
2018
2019         port_mapping = mlxsw_sp_port->mapping;
2020
2021         for (i = 0; i < count; i++)
2022                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2023                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2024
2025         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
2026                                          count, offset);
2027         if (err) {
2028                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2029                 goto err_port_split_create;
2030         }
2031
2032         return 0;
2033
2034 err_port_split_create:
2035         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2036         return err;
2037 }
2038
2039 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
2040                                  struct netlink_ext_ack *extack)
2041 {
2042         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2043         struct mlxsw_sp_port *mlxsw_sp_port;
2044         unsigned int count;
2045         int max_width;
2046         u8 base_port;
2047         int offset;
2048         int i;
2049
2050         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2051         if (!mlxsw_sp_port) {
2052                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2053                         local_port);
2054                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2055                 return -EINVAL;
2056         }
2057
2058         if (!mlxsw_sp_port->split) {
2059                 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2060                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2061                 return -EINVAL;
2062         }
2063
2064         max_width = mlxsw_core_module_max_width(mlxsw_core,
2065                                                 mlxsw_sp_port->mapping.module);
2066         if (max_width < 0) {
2067                 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2068                 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2069                 return max_width;
2070         }
2071
2072         count = max_width / mlxsw_sp_port->mapping.width;
2073
2074         offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2075         if (WARN_ON(offset < 0)) {
2076                 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2077                 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2078                 return -EINVAL;
2079         }
2080
2081         base_port = mlxsw_sp_port->split_base_local_port;
2082
2083         for (i = 0; i < count; i++)
2084                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2085                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2086
2087         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2088
2089         return 0;
2090 }
2091
2092 static void
2093 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2094 {
2095         int i;
2096
2097         for (i = 0; i < TC_MAX_QUEUE; i++)
2098                 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2099 }
2100
2101 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2102                                      char *pude_pl, void *priv)
2103 {
2104         struct mlxsw_sp *mlxsw_sp = priv;
2105         struct mlxsw_sp_port *mlxsw_sp_port;
2106         enum mlxsw_reg_pude_oper_status status;
2107         u8 local_port;
2108
2109         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2110         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2111         if (!mlxsw_sp_port)
2112                 return;
2113
2114         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2115         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2116                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2117                 netif_carrier_on(mlxsw_sp_port->dev);
2118                 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2119         } else {
2120                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2121                 netif_carrier_off(mlxsw_sp_port->dev);
2122                 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2123         }
2124 }
2125
2126 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2127                                           char *mtpptr_pl, bool ingress)
2128 {
2129         u8 local_port;
2130         u8 num_rec;
2131         int i;
2132
2133         local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2134         num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2135         for (i = 0; i < num_rec; i++) {
2136                 u8 domain_number;
2137                 u8 message_type;
2138                 u16 sequence_id;
2139                 u64 timestamp;
2140
2141                 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2142                                         &domain_number, &sequence_id,
2143                                         &timestamp);
2144                 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2145                                             message_type, domain_number,
2146                                             sequence_id, timestamp);
2147         }
2148 }
2149
2150 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2151                                               char *mtpptr_pl, void *priv)
2152 {
2153         struct mlxsw_sp *mlxsw_sp = priv;
2154
2155         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2156 }
2157
2158 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2159                                               char *mtpptr_pl, void *priv)
2160 {
2161         struct mlxsw_sp *mlxsw_sp = priv;
2162
2163         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2164 }
2165
2166 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2167                                        u8 local_port, void *priv)
2168 {
2169         struct mlxsw_sp *mlxsw_sp = priv;
2170         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2171         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2172
2173         if (unlikely(!mlxsw_sp_port)) {
2174                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2175                                      local_port);
2176                 return;
2177         }
2178
2179         skb->dev = mlxsw_sp_port->dev;
2180
2181         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2182         u64_stats_update_begin(&pcpu_stats->syncp);
2183         pcpu_stats->rx_packets++;
2184         pcpu_stats->rx_bytes += skb->len;
2185         u64_stats_update_end(&pcpu_stats->syncp);
2186
2187         skb->protocol = eth_type_trans(skb, skb->dev);
2188         netif_receive_skb(skb);
2189 }
2190
2191 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2192                                            void *priv)
2193 {
2194         skb->offload_fwd_mark = 1;
2195         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2196 }
2197
2198 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2199                                               u8 local_port, void *priv)
2200 {
2201         skb->offload_l3_fwd_mark = 1;
2202         skb->offload_fwd_mark = 1;
2203         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2204 }
2205
2206 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2207                           u8 local_port)
2208 {
2209         mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2210 }
2211
2212 void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2213                              u8 local_port)
2214 {
2215         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2216         struct mlxsw_sp_port_sample *sample;
2217         u32 size;
2218
2219         if (unlikely(!mlxsw_sp_port)) {
2220                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2221                                      local_port);
2222                 goto out;
2223         }
2224
2225         rcu_read_lock();
2226         sample = rcu_dereference(mlxsw_sp_port->sample);
2227         if (!sample)
2228                 goto out_unlock;
2229         size = sample->truncate ? sample->trunc_size : skb->len;
2230         psample_sample_packet(sample->psample_group, skb, size,
2231                               mlxsw_sp_port->dev->ifindex, 0, sample->rate);
2232 out_unlock:
2233         rcu_read_unlock();
2234 out:
2235         consume_skb(skb);
2236 }
2237
2238 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2239         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2240                   _is_ctrl, SP_##_trap_group, DISCARD)
2241
2242 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
2243         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
2244                 _is_ctrl, SP_##_trap_group, DISCARD)
2245
2246 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2247         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2248                 _is_ctrl, SP_##_trap_group, DISCARD)
2249
2250 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
2251         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2252
2253 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2254         /* Events */
2255         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2256         /* L2 traps */
2257         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2258         /* L3 traps */
2259         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2260                           false),
2261         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2262         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2263                           false),
2264         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2265                              ROUTER_EXP, false),
2266         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2267                              ROUTER_EXP, false),
2268         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2269                              ROUTER_EXP, false),
2270         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2271                              ROUTER_EXP, false),
2272         /* Multicast Router Traps */
2273         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2274         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2275         /* NVE traps */
2276         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2277 };
2278
2279 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2280         /* Events */
2281         MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2282         MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2283 };
2284
2285 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2286 {
2287         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2288         char qpcr_pl[MLXSW_REG_QPCR_LEN];
2289         enum mlxsw_reg_qpcr_ir_units ir_units;
2290         int max_cpu_policers;
2291         bool is_bytes;
2292         u8 burst_size;
2293         u32 rate;
2294         int i, err;
2295
2296         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2297                 return -EIO;
2298
2299         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2300
2301         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2302         for (i = 0; i < max_cpu_policers; i++) {
2303                 is_bytes = false;
2304                 switch (i) {
2305                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2306                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2307                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2308                         rate = 1024;
2309                         burst_size = 7;
2310                         break;
2311                 default:
2312                         continue;
2313                 }
2314
2315                 __set_bit(i, mlxsw_sp->trap->policers_usage);
2316                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2317                                     burst_size);
2318                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2319                 if (err)
2320                         return err;
2321         }
2322
2323         return 0;
2324 }
2325
2326 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2327 {
2328         char htgt_pl[MLXSW_REG_HTGT_LEN];
2329         enum mlxsw_reg_htgt_trap_group i;
2330         int max_cpu_policers;
2331         int max_trap_groups;
2332         u8 priority, tc;
2333         u16 policer_id;
2334         int err;
2335
2336         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2337                 return -EIO;
2338
2339         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2340         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2341
2342         for (i = 0; i < max_trap_groups; i++) {
2343                 policer_id = i;
2344                 switch (i) {
2345                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2346                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2347                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2348                         priority = 1;
2349                         tc = 1;
2350                         break;
2351                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2352                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2353                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
2354                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2355                         break;
2356                 default:
2357                         continue;
2358                 }
2359
2360                 if (max_cpu_policers <= policer_id &&
2361                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2362                         return -EIO;
2363
2364                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2365                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2366                 if (err)
2367                         return err;
2368         }
2369
2370         return 0;
2371 }
2372
2373 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2374                                    const struct mlxsw_listener listeners[],
2375                                    size_t listeners_count)
2376 {
2377         int i;
2378         int err;
2379
2380         for (i = 0; i < listeners_count; i++) {
2381                 err = mlxsw_core_trap_register(mlxsw_sp->core,
2382                                                &listeners[i],
2383                                                mlxsw_sp);
2384                 if (err)
2385                         goto err_listener_register;
2386
2387         }
2388         return 0;
2389
2390 err_listener_register:
2391         for (i--; i >= 0; i--) {
2392                 mlxsw_core_trap_unregister(mlxsw_sp->core,
2393                                            &listeners[i],
2394                                            mlxsw_sp);
2395         }
2396         return err;
2397 }
2398
2399 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2400                                       const struct mlxsw_listener listeners[],
2401                                       size_t listeners_count)
2402 {
2403         int i;
2404
2405         for (i = 0; i < listeners_count; i++) {
2406                 mlxsw_core_trap_unregister(mlxsw_sp->core,
2407                                            &listeners[i],
2408                                            mlxsw_sp);
2409         }
2410 }
2411
2412 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2413 {
2414         struct mlxsw_sp_trap *trap;
2415         u64 max_policers;
2416         int err;
2417
2418         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2419                 return -EIO;
2420         max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2421         trap = kzalloc(struct_size(trap, policers_usage,
2422                                    BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2423         if (!trap)
2424                 return -ENOMEM;
2425         trap->max_policers = max_policers;
2426         mlxsw_sp->trap = trap;
2427
2428         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2429         if (err)
2430                 goto err_cpu_policers_set;
2431
2432         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2433         if (err)
2434                 goto err_trap_groups_set;
2435
2436         err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2437                                       ARRAY_SIZE(mlxsw_sp_listener));
2438         if (err)
2439                 goto err_traps_register;
2440
2441         err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2442                                       mlxsw_sp->listeners_count);
2443         if (err)
2444                 goto err_extra_traps_init;
2445
2446         return 0;
2447
2448 err_extra_traps_init:
2449         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2450                                   ARRAY_SIZE(mlxsw_sp_listener));
2451 err_traps_register:
2452 err_trap_groups_set:
2453 err_cpu_policers_set:
2454         kfree(trap);
2455         return err;
2456 }
2457
2458 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2459 {
2460         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2461                                   mlxsw_sp->listeners_count);
2462         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2463                                   ARRAY_SIZE(mlxsw_sp_listener));
2464         kfree(mlxsw_sp->trap);
2465 }
2466
2467 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2468
2469 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2470 {
2471         char slcr_pl[MLXSW_REG_SLCR_LEN];
2472         u32 seed;
2473         int err;
2474
2475         seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2476                      MLXSW_SP_LAG_SEED_INIT);
2477         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2478                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2479                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2480                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2481                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2482                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2483                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2484                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2485                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2486         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2487         if (err)
2488                 return err;
2489
2490         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2491             !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2492                 return -EIO;
2493
2494         mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2495                                  sizeof(struct mlxsw_sp_upper),
2496                                  GFP_KERNEL);
2497         if (!mlxsw_sp->lags)
2498                 return -ENOMEM;
2499
2500         return 0;
2501 }
2502
2503 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2504 {
2505         kfree(mlxsw_sp->lags);
2506 }
2507
2508 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2509 {
2510         char htgt_pl[MLXSW_REG_HTGT_LEN];
2511         int err;
2512
2513         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2514                             MLXSW_REG_HTGT_INVALID_POLICER,
2515                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2516                             MLXSW_REG_HTGT_DEFAULT_TC);
2517         err =  mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2518         if (err)
2519                 return err;
2520
2521         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
2522                             MLXSW_REG_HTGT_INVALID_POLICER,
2523                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2524                             MLXSW_REG_HTGT_DEFAULT_TC);
2525         err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2526         if (err)
2527                 return err;
2528
2529         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
2530                             MLXSW_REG_HTGT_INVALID_POLICER,
2531                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2532                             MLXSW_REG_HTGT_DEFAULT_TC);
2533         err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2534         if (err)
2535                 return err;
2536
2537         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
2538                             MLXSW_REG_HTGT_INVALID_POLICER,
2539                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2540                             MLXSW_REG_HTGT_DEFAULT_TC);
2541         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2542 }
2543
2544 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2545         .clock_init     = mlxsw_sp1_ptp_clock_init,
2546         .clock_fini     = mlxsw_sp1_ptp_clock_fini,
2547         .init           = mlxsw_sp1_ptp_init,
2548         .fini           = mlxsw_sp1_ptp_fini,
2549         .receive        = mlxsw_sp1_ptp_receive,
2550         .transmitted    = mlxsw_sp1_ptp_transmitted,
2551         .hwtstamp_get   = mlxsw_sp1_ptp_hwtstamp_get,
2552         .hwtstamp_set   = mlxsw_sp1_ptp_hwtstamp_set,
2553         .shaper_work    = mlxsw_sp1_ptp_shaper_work,
2554         .get_ts_info    = mlxsw_sp1_ptp_get_ts_info,
2555         .get_stats_count = mlxsw_sp1_get_stats_count,
2556         .get_stats_strings = mlxsw_sp1_get_stats_strings,
2557         .get_stats      = mlxsw_sp1_get_stats,
2558 };
2559
2560 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2561         .clock_init     = mlxsw_sp2_ptp_clock_init,
2562         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2563         .init           = mlxsw_sp2_ptp_init,
2564         .fini           = mlxsw_sp2_ptp_fini,
2565         .receive        = mlxsw_sp2_ptp_receive,
2566         .transmitted    = mlxsw_sp2_ptp_transmitted,
2567         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2568         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2569         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2570         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2571         .get_stats_count = mlxsw_sp2_get_stats_count,
2572         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2573         .get_stats      = mlxsw_sp2_get_stats,
2574 };
2575
2576 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2577                                     unsigned long event, void *ptr);
2578
2579 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2580                          const struct mlxsw_bus_info *mlxsw_bus_info,
2581                          struct netlink_ext_ack *extack)
2582 {
2583         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2584         int err;
2585
2586         mlxsw_sp->core = mlxsw_core;
2587         mlxsw_sp->bus_info = mlxsw_bus_info;
2588
2589         mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2590
2591         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2592         if (err) {
2593                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2594                 return err;
2595         }
2596
2597         err = mlxsw_sp_kvdl_init(mlxsw_sp);
2598         if (err) {
2599                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2600                 return err;
2601         }
2602
2603         err = mlxsw_sp_fids_init(mlxsw_sp);
2604         if (err) {
2605                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2606                 goto err_fids_init;
2607         }
2608
2609         err = mlxsw_sp_policers_init(mlxsw_sp);
2610         if (err) {
2611                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2612                 goto err_policers_init;
2613         }
2614
2615         err = mlxsw_sp_traps_init(mlxsw_sp);
2616         if (err) {
2617                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2618                 goto err_traps_init;
2619         }
2620
2621         err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2622         if (err) {
2623                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2624                 goto err_devlink_traps_init;
2625         }
2626
2627         err = mlxsw_sp_buffers_init(mlxsw_sp);
2628         if (err) {
2629                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2630                 goto err_buffers_init;
2631         }
2632
2633         err = mlxsw_sp_lag_init(mlxsw_sp);
2634         if (err) {
2635                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2636                 goto err_lag_init;
2637         }
2638
2639         /* Initialize SPAN before router and switchdev, so that those components
2640          * can call mlxsw_sp_span_respin().
2641          */
2642         err = mlxsw_sp_span_init(mlxsw_sp);
2643         if (err) {
2644                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2645                 goto err_span_init;
2646         }
2647
2648         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2649         if (err) {
2650                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2651                 goto err_switchdev_init;
2652         }
2653
2654         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2655         if (err) {
2656                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2657                 goto err_counter_pool_init;
2658         }
2659
2660         err = mlxsw_sp_afa_init(mlxsw_sp);
2661         if (err) {
2662                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2663                 goto err_afa_init;
2664         }
2665
2666         err = mlxsw_sp_nve_init(mlxsw_sp);
2667         if (err) {
2668                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2669                 goto err_nve_init;
2670         }
2671
2672         err = mlxsw_sp_acl_init(mlxsw_sp);
2673         if (err) {
2674                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2675                 goto err_acl_init;
2676         }
2677
2678         err = mlxsw_sp_router_init(mlxsw_sp, extack);
2679         if (err) {
2680                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2681                 goto err_router_init;
2682         }
2683
2684         if (mlxsw_sp->bus_info->read_frc_capable) {
2685                 /* NULL is a valid return value from clock_init */
2686                 mlxsw_sp->clock =
2687                         mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2688                                                       mlxsw_sp->bus_info->dev);
2689                 if (IS_ERR(mlxsw_sp->clock)) {
2690                         err = PTR_ERR(mlxsw_sp->clock);
2691                         dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2692                         goto err_ptp_clock_init;
2693                 }
2694         }
2695
2696         if (mlxsw_sp->clock) {
2697                 /* NULL is a valid return value from ptp_ops->init */
2698                 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2699                 if (IS_ERR(mlxsw_sp->ptp_state)) {
2700                         err = PTR_ERR(mlxsw_sp->ptp_state);
2701                         dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2702                         goto err_ptp_init;
2703                 }
2704         }
2705
2706         /* Initialize netdevice notifier after router and SPAN is initialized,
2707          * so that the event handler can use router structures and call SPAN
2708          * respin.
2709          */
2710         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2711         err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2712                                               &mlxsw_sp->netdevice_nb);
2713         if (err) {
2714                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2715                 goto err_netdev_notifier;
2716         }
2717
2718         err = mlxsw_sp_dpipe_init(mlxsw_sp);
2719         if (err) {
2720                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2721                 goto err_dpipe_init;
2722         }
2723
2724         err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2725         if (err) {
2726                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2727                 goto err_port_module_info_init;
2728         }
2729
2730         err = mlxsw_sp_ports_create(mlxsw_sp);
2731         if (err) {
2732                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2733                 goto err_ports_create;
2734         }
2735
2736         return 0;
2737
2738 err_ports_create:
2739         mlxsw_sp_port_module_info_fini(mlxsw_sp);
2740 err_port_module_info_init:
2741         mlxsw_sp_dpipe_fini(mlxsw_sp);
2742 err_dpipe_init:
2743         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2744                                           &mlxsw_sp->netdevice_nb);
2745 err_netdev_notifier:
2746         if (mlxsw_sp->clock)
2747                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2748 err_ptp_init:
2749         if (mlxsw_sp->clock)
2750                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2751 err_ptp_clock_init:
2752         mlxsw_sp_router_fini(mlxsw_sp);
2753 err_router_init:
2754         mlxsw_sp_acl_fini(mlxsw_sp);
2755 err_acl_init:
2756         mlxsw_sp_nve_fini(mlxsw_sp);
2757 err_nve_init:
2758         mlxsw_sp_afa_fini(mlxsw_sp);
2759 err_afa_init:
2760         mlxsw_sp_counter_pool_fini(mlxsw_sp);
2761 err_counter_pool_init:
2762         mlxsw_sp_switchdev_fini(mlxsw_sp);
2763 err_switchdev_init:
2764         mlxsw_sp_span_fini(mlxsw_sp);
2765 err_span_init:
2766         mlxsw_sp_lag_fini(mlxsw_sp);
2767 err_lag_init:
2768         mlxsw_sp_buffers_fini(mlxsw_sp);
2769 err_buffers_init:
2770         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2771 err_devlink_traps_init:
2772         mlxsw_sp_traps_fini(mlxsw_sp);
2773 err_traps_init:
2774         mlxsw_sp_policers_fini(mlxsw_sp);
2775 err_policers_init:
2776         mlxsw_sp_fids_fini(mlxsw_sp);
2777 err_fids_init:
2778         mlxsw_sp_kvdl_fini(mlxsw_sp);
2779         return err;
2780 }
2781
2782 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
2783                           const struct mlxsw_bus_info *mlxsw_bus_info,
2784                           struct netlink_ext_ack *extack)
2785 {
2786         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2787
2788         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
2789         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
2790         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
2791         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
2792         mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
2793         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
2794         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
2795         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
2796         mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
2797         mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
2798         mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
2799         mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
2800         mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
2801         mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
2802         mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
2803         mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
2804         mlxsw_sp->listeners = mlxsw_sp1_listener;
2805         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
2806         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
2807
2808         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2809 }
2810
2811 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
2812                           const struct mlxsw_bus_info *mlxsw_bus_info,
2813                           struct netlink_ext_ack *extack)
2814 {
2815         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2816
2817         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2818         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2819         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2820         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2821         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2822         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2823         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2824         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2825         mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2826         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2827         mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
2828         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2829         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2830         mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
2831         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2832         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2833         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
2834
2835         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2836 }
2837
2838 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
2839                           const struct mlxsw_bus_info *mlxsw_bus_info,
2840                           struct netlink_ext_ack *extack)
2841 {
2842         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2843
2844         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2845         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2846         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2847         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2848         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2849         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2850         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2851         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2852         mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2853         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2854         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
2855         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2856         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2857         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
2858         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2859         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2860         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
2861
2862         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2863 }
2864
2865 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2866 {
2867         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2868
2869         mlxsw_sp_ports_remove(mlxsw_sp);
2870         mlxsw_sp_port_module_info_fini(mlxsw_sp);
2871         mlxsw_sp_dpipe_fini(mlxsw_sp);
2872         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2873                                           &mlxsw_sp->netdevice_nb);
2874         if (mlxsw_sp->clock) {
2875                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2876                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2877         }
2878         mlxsw_sp_router_fini(mlxsw_sp);
2879         mlxsw_sp_acl_fini(mlxsw_sp);
2880         mlxsw_sp_nve_fini(mlxsw_sp);
2881         mlxsw_sp_afa_fini(mlxsw_sp);
2882         mlxsw_sp_counter_pool_fini(mlxsw_sp);
2883         mlxsw_sp_switchdev_fini(mlxsw_sp);
2884         mlxsw_sp_span_fini(mlxsw_sp);
2885         mlxsw_sp_lag_fini(mlxsw_sp);
2886         mlxsw_sp_buffers_fini(mlxsw_sp);
2887         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2888         mlxsw_sp_traps_fini(mlxsw_sp);
2889         mlxsw_sp_policers_fini(mlxsw_sp);
2890         mlxsw_sp_fids_fini(mlxsw_sp);
2891         mlxsw_sp_kvdl_fini(mlxsw_sp);
2892 }
2893
2894 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
2895  * 802.1Q FIDs
2896  */
2897 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE   (MLXSW_SP_FID_8021D_MAX + \
2898                                          VLAN_VID_MASK - 1)
2899
2900 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
2901         .used_max_mid                   = 1,
2902         .max_mid                        = MLXSW_SP_MID_MAX,
2903         .used_flood_tables              = 1,
2904         .used_flood_mode                = 1,
2905         .flood_mode                     = 3,
2906         .max_fid_flood_tables           = 3,
2907         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2908         .used_max_ib_mc                 = 1,
2909         .max_ib_mc                      = 0,
2910         .used_max_pkey                  = 1,
2911         .max_pkey                       = 0,
2912         .used_kvd_sizes                 = 1,
2913         .kvd_hash_single_parts          = 59,
2914         .kvd_hash_double_parts          = 41,
2915         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
2916         .swid_config                    = {
2917                 {
2918                         .used_type      = 1,
2919                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2920                 }
2921         },
2922 };
2923
2924 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
2925         .used_max_mid                   = 1,
2926         .max_mid                        = MLXSW_SP_MID_MAX,
2927         .used_flood_tables              = 1,
2928         .used_flood_mode                = 1,
2929         .flood_mode                     = 3,
2930         .max_fid_flood_tables           = 3,
2931         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2932         .used_max_ib_mc                 = 1,
2933         .max_ib_mc                      = 0,
2934         .used_max_pkey                  = 1,
2935         .max_pkey                       = 0,
2936         .swid_config                    = {
2937                 {
2938                         .used_type      = 1,
2939                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2940                 }
2941         },
2942 };
2943
2944 static void
2945 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
2946                                       struct devlink_resource_size_params *kvd_size_params,
2947                                       struct devlink_resource_size_params *linear_size_params,
2948                                       struct devlink_resource_size_params *hash_double_size_params,
2949                                       struct devlink_resource_size_params *hash_single_size_params)
2950 {
2951         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2952                                                  KVD_SINGLE_MIN_SIZE);
2953         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2954                                                  KVD_DOUBLE_MIN_SIZE);
2955         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2956         u32 linear_size_min = 0;
2957
2958         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
2959                                           MLXSW_SP_KVD_GRANULARITY,
2960                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2961         devlink_resource_size_params_init(linear_size_params, linear_size_min,
2962                                           kvd_size - single_size_min -
2963                                           double_size_min,
2964                                           MLXSW_SP_KVD_GRANULARITY,
2965                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2966         devlink_resource_size_params_init(hash_double_size_params,
2967                                           double_size_min,
2968                                           kvd_size - single_size_min -
2969                                           linear_size_min,
2970                                           MLXSW_SP_KVD_GRANULARITY,
2971                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2972         devlink_resource_size_params_init(hash_single_size_params,
2973                                           single_size_min,
2974                                           kvd_size - double_size_min -
2975                                           linear_size_min,
2976                                           MLXSW_SP_KVD_GRANULARITY,
2977                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2978 }
2979
2980 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
2981 {
2982         struct devlink *devlink = priv_to_devlink(mlxsw_core);
2983         struct devlink_resource_size_params hash_single_size_params;
2984         struct devlink_resource_size_params hash_double_size_params;
2985         struct devlink_resource_size_params linear_size_params;
2986         struct devlink_resource_size_params kvd_size_params;
2987         u32 kvd_size, single_size, double_size, linear_size;
2988         const struct mlxsw_config_profile *profile;
2989         int err;
2990
2991         profile = &mlxsw_sp1_config_profile;
2992         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
2993                 return -EIO;
2994
2995         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
2996                                               &linear_size_params,
2997                                               &hash_double_size_params,
2998                                               &hash_single_size_params);
2999
3000         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3001         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3002                                         kvd_size, MLXSW_SP_RESOURCE_KVD,
3003                                         DEVLINK_RESOURCE_ID_PARENT_TOP,
3004                                         &kvd_size_params);
3005         if (err)
3006                 return err;
3007
3008         linear_size = profile->kvd_linear_size;
3009         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3010                                         linear_size,
3011                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
3012                                         MLXSW_SP_RESOURCE_KVD,
3013                                         &linear_size_params);
3014         if (err)
3015                 return err;
3016
3017         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3018         if  (err)
3019                 return err;
3020
3021         double_size = kvd_size - linear_size;
3022         double_size *= profile->kvd_hash_double_parts;
3023         double_size /= profile->kvd_hash_double_parts +
3024                        profile->kvd_hash_single_parts;
3025         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3026         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3027                                         double_size,
3028                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3029                                         MLXSW_SP_RESOURCE_KVD,
3030                                         &hash_double_size_params);
3031         if (err)
3032                 return err;
3033
3034         single_size = kvd_size - double_size - linear_size;
3035         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3036                                         single_size,
3037                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3038                                         MLXSW_SP_RESOURCE_KVD,
3039                                         &hash_single_size_params);
3040         if (err)
3041                 return err;
3042
3043         return 0;
3044 }
3045
3046 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3047 {
3048         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3049         struct devlink_resource_size_params kvd_size_params;
3050         u32 kvd_size;
3051
3052         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3053                 return -EIO;
3054
3055         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3056         devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3057                                           MLXSW_SP_KVD_GRANULARITY,
3058                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3059
3060         return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3061                                          kvd_size, MLXSW_SP_RESOURCE_KVD,
3062                                          DEVLINK_RESOURCE_ID_PARENT_TOP,
3063                                          &kvd_size_params);
3064 }
3065
3066 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3067 {
3068         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3069         struct devlink_resource_size_params span_size_params;
3070         u32 max_span;
3071
3072         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3073                 return -EIO;
3074
3075         max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3076         devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3077                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3078
3079         return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3080                                          max_span, MLXSW_SP_RESOURCE_SPAN,
3081                                          DEVLINK_RESOURCE_ID_PARENT_TOP,
3082                                          &span_size_params);
3083 }
3084
3085 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3086 {
3087         int err;
3088
3089         err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3090         if (err)
3091                 return err;
3092
3093         err = mlxsw_sp_resources_span_register(mlxsw_core);
3094         if (err)
3095                 goto err_resources_span_register;
3096
3097         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3098         if (err)
3099                 goto err_resources_counter_register;
3100
3101         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3102         if (err)
3103                 goto err_resources_counter_register;
3104
3105         return 0;
3106
3107 err_resources_counter_register:
3108 err_resources_span_register:
3109         devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3110         return err;
3111 }
3112
3113 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3114 {
3115         int err;
3116
3117         err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3118         if (err)
3119                 return err;
3120
3121         err = mlxsw_sp_resources_span_register(mlxsw_core);
3122         if (err)
3123                 goto err_resources_span_register;
3124
3125         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3126         if (err)
3127                 goto err_resources_counter_register;
3128
3129         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3130         if (err)
3131                 goto err_resources_counter_register;
3132
3133         return 0;
3134
3135 err_resources_counter_register:
3136 err_resources_span_register:
3137         devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3138         return err;
3139 }
3140
3141 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3142                                   const struct mlxsw_config_profile *profile,
3143                                   u64 *p_single_size, u64 *p_double_size,
3144                                   u64 *p_linear_size)
3145 {
3146         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3147         u32 double_size;
3148         int err;
3149
3150         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3151             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3152                 return -EIO;
3153
3154         /* The hash part is what left of the kvd without the
3155          * linear part. It is split to the single size and
3156          * double size by the parts ratio from the profile.
3157          * Both sizes must be a multiplications of the
3158          * granularity from the profile. In case the user
3159          * provided the sizes they are obtained via devlink.
3160          */
3161         err = devlink_resource_size_get(devlink,
3162                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
3163                                         p_linear_size);
3164         if (err)
3165                 *p_linear_size = profile->kvd_linear_size;
3166
3167         err = devlink_resource_size_get(devlink,
3168                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3169                                         p_double_size);
3170         if (err) {
3171                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3172                               *p_linear_size;
3173                 double_size *= profile->kvd_hash_double_parts;
3174                 double_size /= profile->kvd_hash_double_parts +
3175                                profile->kvd_hash_single_parts;
3176                 *p_double_size = rounddown(double_size,
3177                                            MLXSW_SP_KVD_GRANULARITY);
3178         }
3179
3180         err = devlink_resource_size_get(devlink,
3181                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3182                                         p_single_size);
3183         if (err)
3184                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3185                                  *p_double_size - *p_linear_size;
3186
3187         /* Check results are legal. */
3188         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3189             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3190             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3191                 return -EIO;
3192
3193         return 0;
3194 }
3195
3196 static int
3197 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3198                                              struct devlink_param_gset_ctx *ctx)
3199 {
3200         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3201         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3202
3203         ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3204         return 0;
3205 }
3206
3207 static int
3208 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3209                                              struct devlink_param_gset_ctx *ctx)
3210 {
3211         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3212         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3213
3214         return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3215 }
3216
3217 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3218         DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3219                              "acl_region_rehash_interval",
3220                              DEVLINK_PARAM_TYPE_U32,
3221                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3222                              mlxsw_sp_params_acl_region_rehash_intrvl_get,
3223                              mlxsw_sp_params_acl_region_rehash_intrvl_set,
3224                              NULL),
3225 };
3226
3227 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3228 {
3229         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3230         union devlink_param_value value;
3231         int err;
3232
3233         err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3234                                       ARRAY_SIZE(mlxsw_sp2_devlink_params));
3235         if (err)
3236                 return err;
3237
3238         value.vu32 = 0;
3239         devlink_param_driverinit_value_set(devlink,
3240                                            MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3241                                            value);
3242         return 0;
3243 }
3244
3245 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3246 {
3247         devlink_params_unregister(priv_to_devlink(mlxsw_core),
3248                                   mlxsw_sp2_devlink_params,
3249                                   ARRAY_SIZE(mlxsw_sp2_devlink_params));
3250 }
3251
3252 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3253                                      struct sk_buff *skb, u8 local_port)
3254 {
3255         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3256
3257         skb_pull(skb, MLXSW_TXHDR_LEN);
3258         mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3259 }
3260
3261 static struct mlxsw_driver mlxsw_sp1_driver = {
3262         .kind                           = mlxsw_sp1_driver_name,
3263         .priv_size                      = sizeof(struct mlxsw_sp),
3264         .fw_req_rev                     = &mlxsw_sp1_fw_rev,
3265         .fw_filename                    = MLXSW_SP1_FW_FILENAME,
3266         .init                           = mlxsw_sp1_init,
3267         .fini                           = mlxsw_sp_fini,
3268         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3269         .port_split                     = mlxsw_sp_port_split,
3270         .port_unsplit                   = mlxsw_sp_port_unsplit,
3271         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3272         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3273         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3274         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3275         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3276         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3277         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3278         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3279         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3280         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3281         .trap_init                      = mlxsw_sp_trap_init,
3282         .trap_fini                      = mlxsw_sp_trap_fini,
3283         .trap_action_set                = mlxsw_sp_trap_action_set,
3284         .trap_group_init                = mlxsw_sp_trap_group_init,
3285         .trap_group_set                 = mlxsw_sp_trap_group_set,
3286         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3287         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3288         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3289         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3290         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3291         .resources_register             = mlxsw_sp1_resources_register,
3292         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
3293         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3294         .txhdr_len                      = MLXSW_TXHDR_LEN,
3295         .profile                        = &mlxsw_sp1_config_profile,
3296         .res_query_enabled              = true,
3297         .fw_fatal_enabled               = true,
3298         .temp_warn_enabled              = true,
3299 };
3300
3301 static struct mlxsw_driver mlxsw_sp2_driver = {
3302         .kind                           = mlxsw_sp2_driver_name,
3303         .priv_size                      = sizeof(struct mlxsw_sp),
3304         .fw_req_rev                     = &mlxsw_sp2_fw_rev,
3305         .fw_filename                    = MLXSW_SP2_FW_FILENAME,
3306         .init                           = mlxsw_sp2_init,
3307         .fini                           = mlxsw_sp_fini,
3308         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3309         .port_split                     = mlxsw_sp_port_split,
3310         .port_unsplit                   = mlxsw_sp_port_unsplit,
3311         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3312         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3313         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3314         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3315         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3316         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3317         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3318         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3319         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3320         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3321         .trap_init                      = mlxsw_sp_trap_init,
3322         .trap_fini                      = mlxsw_sp_trap_fini,
3323         .trap_action_set                = mlxsw_sp_trap_action_set,
3324         .trap_group_init                = mlxsw_sp_trap_group_init,
3325         .trap_group_set                 = mlxsw_sp_trap_group_set,
3326         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3327         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3328         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3329         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3330         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3331         .resources_register             = mlxsw_sp2_resources_register,
3332         .params_register                = mlxsw_sp2_params_register,
3333         .params_unregister              = mlxsw_sp2_params_unregister,
3334         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3335         .txhdr_len                      = MLXSW_TXHDR_LEN,
3336         .profile                        = &mlxsw_sp2_config_profile,
3337         .res_query_enabled              = true,
3338         .fw_fatal_enabled               = true,
3339         .temp_warn_enabled              = true,
3340 };
3341
3342 static struct mlxsw_driver mlxsw_sp3_driver = {
3343         .kind                           = mlxsw_sp3_driver_name,
3344         .priv_size                      = sizeof(struct mlxsw_sp),
3345         .fw_req_rev                     = &mlxsw_sp3_fw_rev,
3346         .fw_filename                    = MLXSW_SP3_FW_FILENAME,
3347         .init                           = mlxsw_sp3_init,
3348         .fini                           = mlxsw_sp_fini,
3349         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3350         .port_split                     = mlxsw_sp_port_split,
3351         .port_unsplit                   = mlxsw_sp_port_unsplit,
3352         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3353         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3354         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3355         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3356         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3357         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3358         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3359         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3360         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3361         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3362         .trap_init                      = mlxsw_sp_trap_init,
3363         .trap_fini                      = mlxsw_sp_trap_fini,
3364         .trap_action_set                = mlxsw_sp_trap_action_set,
3365         .trap_group_init                = mlxsw_sp_trap_group_init,
3366         .trap_group_set                 = mlxsw_sp_trap_group_set,
3367         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3368         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3369         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3370         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3371         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3372         .resources_register             = mlxsw_sp2_resources_register,
3373         .params_register                = mlxsw_sp2_params_register,
3374         .params_unregister              = mlxsw_sp2_params_unregister,
3375         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3376         .txhdr_len                      = MLXSW_TXHDR_LEN,
3377         .profile                        = &mlxsw_sp2_config_profile,
3378         .res_query_enabled              = true,
3379         .fw_fatal_enabled               = true,
3380         .temp_warn_enabled              = true,
3381 };
3382
3383 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3384 {
3385         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3386 }
3387
3388 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
3389                                    struct netdev_nested_priv *priv)
3390 {
3391         int ret = 0;
3392
3393         if (mlxsw_sp_port_dev_check(lower_dev)) {
3394                 priv->data = (void *)netdev_priv(lower_dev);
3395                 ret = 1;
3396         }
3397
3398         return ret;
3399 }
3400
3401 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3402 {
3403         struct netdev_nested_priv priv = {
3404                 .data = NULL,
3405         };
3406
3407         if (mlxsw_sp_port_dev_check(dev))
3408                 return netdev_priv(dev);
3409
3410         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
3411
3412         return (struct mlxsw_sp_port *)priv.data;
3413 }
3414
3415 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3416 {
3417         struct mlxsw_sp_port *mlxsw_sp_port;
3418
3419         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3420         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3421 }
3422
3423 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3424 {
3425         struct netdev_nested_priv priv = {
3426                 .data = NULL,
3427         };
3428
3429         if (mlxsw_sp_port_dev_check(dev))
3430                 return netdev_priv(dev);
3431
3432         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3433                                       &priv);
3434
3435         return (struct mlxsw_sp_port *)priv.data;
3436 }
3437
3438 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3439 {
3440         struct mlxsw_sp_port *mlxsw_sp_port;
3441
3442         rcu_read_lock();
3443         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3444         if (mlxsw_sp_port)
3445                 dev_hold(mlxsw_sp_port->dev);
3446         rcu_read_unlock();
3447         return mlxsw_sp_port;
3448 }
3449
3450 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3451 {
3452         dev_put(mlxsw_sp_port->dev);
3453 }
3454
3455 static void
3456 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3457                                  struct net_device *lag_dev)
3458 {
3459         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3460         struct net_device *upper_dev;
3461         struct list_head *iter;
3462
3463         if (netif_is_bridge_port(lag_dev))
3464                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3465
3466         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3467                 if (!netif_is_bridge_port(upper_dev))
3468                         continue;
3469                 br_dev = netdev_master_upper_dev_get(upper_dev);
3470                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3471         }
3472 }
3473
3474 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3475 {
3476         char sldr_pl[MLXSW_REG_SLDR_LEN];
3477
3478         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3479         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3480 }
3481
3482 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3483 {
3484         char sldr_pl[MLXSW_REG_SLDR_LEN];
3485
3486         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3487         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3488 }
3489
3490 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3491                                      u16 lag_id, u8 port_index)
3492 {
3493         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3494         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3495
3496         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3497                                       lag_id, port_index);
3498         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3499 }
3500
3501 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3502                                         u16 lag_id)
3503 {
3504         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3505         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3506
3507         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3508                                          lag_id);
3509         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3510 }
3511
3512 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3513                                         u16 lag_id)
3514 {
3515         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3516         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3517
3518         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3519                                         lag_id);
3520         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3521 }
3522
3523 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3524                                          u16 lag_id)
3525 {
3526         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3527         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3528
3529         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3530                                          lag_id);
3531         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3532 }
3533
3534 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3535                                   struct net_device *lag_dev,
3536                                   u16 *p_lag_id)
3537 {
3538         struct mlxsw_sp_upper *lag;
3539         int free_lag_id = -1;
3540         u64 max_lag;
3541         int i;
3542
3543         max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3544         for (i = 0; i < max_lag; i++) {
3545                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3546                 if (lag->ref_count) {
3547                         if (lag->dev == lag_dev) {
3548                                 *p_lag_id = i;
3549                                 return 0;
3550                         }
3551                 } else if (free_lag_id < 0) {
3552                         free_lag_id = i;
3553                 }
3554         }
3555         if (free_lag_id < 0)
3556                 return -EBUSY;
3557         *p_lag_id = free_lag_id;
3558         return 0;
3559 }
3560
3561 static bool
3562 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3563                           struct net_device *lag_dev,
3564                           struct netdev_lag_upper_info *lag_upper_info,
3565                           struct netlink_ext_ack *extack)
3566 {
3567         u16 lag_id;
3568
3569         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3570                 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3571                 return false;
3572         }
3573         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3574                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3575                 return false;
3576         }
3577         return true;
3578 }
3579
3580 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3581                                        u16 lag_id, u8 *p_port_index)
3582 {
3583         u64 max_lag_members;
3584         int i;
3585
3586         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3587                                              MAX_LAG_MEMBERS);
3588         for (i = 0; i < max_lag_members; i++) {
3589                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3590                         *p_port_index = i;
3591                         return 0;
3592                 }
3593         }
3594         return -EBUSY;
3595 }
3596
3597 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3598                                   struct net_device *lag_dev,
3599                                   struct netlink_ext_ack *extack)
3600 {
3601         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3602         struct mlxsw_sp_upper *lag;
3603         u16 lag_id;
3604         u8 port_index;
3605         int err;
3606
3607         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3608         if (err)
3609                 return err;
3610         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3611         if (!lag->ref_count) {
3612                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3613                 if (err)
3614                         return err;
3615                 lag->dev = lag_dev;
3616         }
3617
3618         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3619         if (err)
3620                 return err;
3621         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3622         if (err)
3623                 goto err_col_port_add;
3624
3625         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3626                                    mlxsw_sp_port->local_port);
3627         mlxsw_sp_port->lag_id = lag_id;
3628         mlxsw_sp_port->lagged = 1;
3629         lag->ref_count++;
3630
3631         /* Port is no longer usable as a router interface */
3632         if (mlxsw_sp_port->default_vlan->fid)
3633                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3634
3635         /* Join a router interface configured on the LAG, if exists */
3636         err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
3637                                              lag_dev, extack);
3638         if (err)
3639                 goto err_router_join;
3640
3641         return 0;
3642
3643 err_router_join:
3644         lag->ref_count--;
3645         mlxsw_sp_port->lagged = 0;
3646         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3647                                      mlxsw_sp_port->local_port);
3648         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3649 err_col_port_add:
3650         if (!lag->ref_count)
3651                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3652         return err;
3653 }
3654
3655 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3656                                     struct net_device *lag_dev)
3657 {
3658         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3659         u16 lag_id = mlxsw_sp_port->lag_id;
3660         struct mlxsw_sp_upper *lag;
3661
3662         if (!mlxsw_sp_port->lagged)
3663                 return;
3664         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3665         WARN_ON(lag->ref_count == 0);
3666
3667         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3668
3669         /* Any VLANs configured on the port are no longer valid */
3670         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3671         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3672         /* Make the LAG and its directly linked uppers leave bridges they
3673          * are memeber in
3674          */
3675         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3676
3677         if (lag->ref_count == 1)
3678                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3679
3680         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3681                                      mlxsw_sp_port->local_port);
3682         mlxsw_sp_port->lagged = 0;
3683         lag->ref_count--;
3684
3685         /* Make sure untagged frames are allowed to ingress */
3686         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
3687                                ETH_P_8021Q);
3688 }
3689
3690 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3691                                       u16 lag_id)
3692 {
3693         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3694         char sldr_pl[MLXSW_REG_SLDR_LEN];
3695
3696         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3697                                          mlxsw_sp_port->local_port);
3698         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3699 }
3700
3701 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3702                                          u16 lag_id)
3703 {
3704         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3705         char sldr_pl[MLXSW_REG_SLDR_LEN];
3706
3707         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3708                                             mlxsw_sp_port->local_port);
3709         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3710 }
3711
3712 static int
3713 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
3714 {
3715         int err;
3716
3717         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
3718                                            mlxsw_sp_port->lag_id);
3719         if (err)
3720                 return err;
3721
3722         err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3723         if (err)
3724                 goto err_dist_port_add;
3725
3726         return 0;
3727
3728 err_dist_port_add:
3729         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3730         return err;
3731 }
3732
3733 static int
3734 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
3735 {
3736         int err;
3737
3738         err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3739                                             mlxsw_sp_port->lag_id);
3740         if (err)
3741                 return err;
3742
3743         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
3744                                             mlxsw_sp_port->lag_id);
3745         if (err)
3746                 goto err_col_port_disable;
3747
3748         return 0;
3749
3750 err_col_port_disable:
3751         mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3752         return err;
3753 }
3754
3755 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3756                                      struct netdev_lag_lower_state_info *info)
3757 {
3758         if (info->tx_enabled)
3759                 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
3760         else
3761                 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3762 }
3763
3764 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3765                                  bool enable)
3766 {
3767         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3768         enum mlxsw_reg_spms_state spms_state;
3769         char *spms_pl;
3770         u16 vid;
3771         int err;
3772
3773         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3774                               MLXSW_REG_SPMS_STATE_DISCARDING;
3775
3776         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3777         if (!spms_pl)
3778                 return -ENOMEM;
3779         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3780
3781         for (vid = 0; vid < VLAN_N_VID; vid++)
3782                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3783
3784         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3785         kfree(spms_pl);
3786         return err;
3787 }
3788
3789 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3790 {
3791         u16 vid = 1;
3792         int err;
3793
3794         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
3795         if (err)
3796                 return err;
3797         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3798         if (err)
3799                 goto err_port_stp_set;
3800         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3801                                      true, false);
3802         if (err)
3803                 goto err_port_vlan_set;
3804
3805         for (; vid <= VLAN_N_VID - 1; vid++) {
3806                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3807                                                      vid, false);
3808                 if (err)
3809                         goto err_vid_learning_set;
3810         }
3811
3812         return 0;
3813
3814 err_vid_learning_set:
3815         for (vid--; vid >= 1; vid--)
3816                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3817 err_port_vlan_set:
3818         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3819 err_port_stp_set:
3820         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3821         return err;
3822 }
3823
3824 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3825 {
3826         u16 vid;
3827
3828         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
3829                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3830                                                vid, true);
3831
3832         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3833                                false, false);
3834         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3835         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3836 }
3837
3838 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
3839 {
3840         unsigned int num_vxlans = 0;
3841         struct net_device *dev;
3842         struct list_head *iter;
3843
3844         netdev_for_each_lower_dev(br_dev, dev, iter) {
3845                 if (netif_is_vxlan(dev))
3846                         num_vxlans++;
3847         }
3848
3849         return num_vxlans > 1;
3850 }
3851
3852 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
3853 {
3854         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
3855         struct net_device *dev;
3856         struct list_head *iter;
3857
3858         netdev_for_each_lower_dev(br_dev, dev, iter) {
3859                 u16 pvid;
3860                 int err;
3861
3862                 if (!netif_is_vxlan(dev))
3863                         continue;
3864
3865                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
3866                 if (err || !pvid)
3867                         continue;
3868
3869                 if (test_and_set_bit(pvid, vlans))
3870                         return false;
3871         }
3872
3873         return true;
3874 }
3875
3876 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
3877                                            struct netlink_ext_ack *extack)
3878 {
3879         if (br_multicast_enabled(br_dev)) {
3880                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
3881                 return false;
3882         }
3883
3884         if (!br_vlan_enabled(br_dev) &&
3885             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
3886                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
3887                 return false;
3888         }
3889
3890         if (br_vlan_enabled(br_dev) &&
3891             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
3892                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
3893                 return false;
3894         }
3895
3896         return true;
3897 }
3898
3899 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
3900                                                struct net_device *dev,
3901                                                unsigned long event, void *ptr)
3902 {
3903         struct netdev_notifier_changeupper_info *info;
3904         struct mlxsw_sp_port *mlxsw_sp_port;
3905         struct netlink_ext_ack *extack;
3906         struct net_device *upper_dev;
3907         struct mlxsw_sp *mlxsw_sp;
3908         int err = 0;
3909         u16 proto;
3910
3911         mlxsw_sp_port = netdev_priv(dev);
3912         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3913         info = ptr;
3914         extack = netdev_notifier_info_to_extack(&info->info);
3915
3916         switch (event) {
3917         case NETDEV_PRECHANGEUPPER:
3918                 upper_dev = info->upper_dev;
3919                 if (!is_vlan_dev(upper_dev) &&
3920                     !netif_is_lag_master(upper_dev) &&
3921                     !netif_is_bridge_master(upper_dev) &&
3922                     !netif_is_ovs_master(upper_dev) &&
3923                     !netif_is_macvlan(upper_dev)) {
3924                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
3925                         return -EINVAL;
3926                 }
3927                 if (!info->linking)
3928                         break;
3929                 if (netif_is_bridge_master(upper_dev) &&
3930                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
3931                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
3932                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
3933                         return -EOPNOTSUPP;
3934                 if (netdev_has_any_upper_dev(upper_dev) &&
3935                     (!netif_is_bridge_master(upper_dev) ||
3936                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
3937                                                           upper_dev))) {
3938                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
3939                         return -EINVAL;
3940                 }
3941                 if (netif_is_lag_master(upper_dev) &&
3942                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3943                                                info->upper_info, extack))
3944                         return -EINVAL;
3945                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
3946                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
3947                         return -EINVAL;
3948                 }
3949                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3950                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
3951                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
3952                         return -EINVAL;
3953                 }
3954                 if (netif_is_macvlan(upper_dev) &&
3955                     !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
3956                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
3957                         return -EOPNOTSUPP;
3958                 }
3959                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
3960                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
3961                         return -EINVAL;
3962                 }
3963                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
3964                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
3965                         return -EINVAL;
3966                 }
3967                 if (netif_is_bridge_master(upper_dev)) {
3968                         br_vlan_get_proto(upper_dev, &proto);
3969                         if (br_vlan_enabled(upper_dev) &&
3970                             proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
3971                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
3972                                 return -EOPNOTSUPP;
3973                         }
3974                         if (vlan_uses_dev(lower_dev) &&
3975                             br_vlan_enabled(upper_dev) &&
3976                             proto == ETH_P_8021AD) {
3977                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
3978                                 return -EOPNOTSUPP;
3979                         }
3980                 }
3981                 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
3982                         struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
3983
3984                         if (br_vlan_enabled(br_dev)) {
3985                                 br_vlan_get_proto(br_dev, &proto);
3986                                 if (proto == ETH_P_8021AD) {
3987                                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
3988                                         return -EOPNOTSUPP;
3989                                 }
3990                         }
3991                 }
3992                 if (is_vlan_dev(upper_dev) &&
3993                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
3994                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
3995                         return -EOPNOTSUPP;
3996                 }
3997                 break;
3998         case NETDEV_CHANGEUPPER:
3999                 upper_dev = info->upper_dev;
4000                 if (netif_is_bridge_master(upper_dev)) {
4001                         if (info->linking)
4002                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4003                                                                 lower_dev,
4004                                                                 upper_dev,
4005                                                                 extack);
4006                         else
4007                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4008                                                            lower_dev,
4009                                                            upper_dev);
4010                 } else if (netif_is_lag_master(upper_dev)) {
4011                         if (info->linking) {
4012                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4013                                                              upper_dev, extack);
4014                         } else {
4015                                 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4016                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4017                                                         upper_dev);
4018                         }
4019                 } else if (netif_is_ovs_master(upper_dev)) {
4020                         if (info->linking)
4021                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4022                         else
4023                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4024                 } else if (netif_is_macvlan(upper_dev)) {
4025                         if (!info->linking)
4026                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4027                 } else if (is_vlan_dev(upper_dev)) {
4028                         struct net_device *br_dev;
4029
4030                         if (!netif_is_bridge_port(upper_dev))
4031                                 break;
4032                         if (info->linking)
4033                                 break;
4034                         br_dev = netdev_master_upper_dev_get(upper_dev);
4035                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4036                                                    br_dev);
4037                 }
4038                 break;
4039         }
4040
4041         return err;
4042 }
4043
4044 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4045                                                unsigned long event, void *ptr)
4046 {
4047         struct netdev_notifier_changelowerstate_info *info;
4048         struct mlxsw_sp_port *mlxsw_sp_port;
4049         int err;
4050
4051         mlxsw_sp_port = netdev_priv(dev);
4052         info = ptr;
4053
4054         switch (event) {
4055         case NETDEV_CHANGELOWERSTATE:
4056                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4057                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4058                                                         info->lower_state_info);
4059                         if (err)
4060                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4061                 }
4062                 break;
4063         }
4064
4065         return 0;
4066 }
4067
4068 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4069                                          struct net_device *port_dev,
4070                                          unsigned long event, void *ptr)
4071 {
4072         switch (event) {
4073         case NETDEV_PRECHANGEUPPER:
4074         case NETDEV_CHANGEUPPER:
4075                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4076                                                            event, ptr);
4077         case NETDEV_CHANGELOWERSTATE:
4078                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4079                                                            ptr);
4080         }
4081
4082         return 0;
4083 }
4084
4085 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4086                                         unsigned long event, void *ptr)
4087 {
4088         struct net_device *dev;
4089         struct list_head *iter;
4090         int ret;
4091
4092         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4093                 if (mlxsw_sp_port_dev_check(dev)) {
4094                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4095                                                             ptr);
4096                         if (ret)
4097                                 return ret;
4098                 }
4099         }
4100
4101         return 0;
4102 }
4103
4104 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4105                                               struct net_device *dev,
4106                                               unsigned long event, void *ptr,
4107                                               u16 vid)
4108 {
4109         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4110         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4111         struct netdev_notifier_changeupper_info *info = ptr;
4112         struct netlink_ext_ack *extack;
4113         struct net_device *upper_dev;
4114         int err = 0;
4115
4116         extack = netdev_notifier_info_to_extack(&info->info);
4117
4118         switch (event) {
4119         case NETDEV_PRECHANGEUPPER:
4120                 upper_dev = info->upper_dev;
4121                 if (!netif_is_bridge_master(upper_dev) &&
4122                     !netif_is_macvlan(upper_dev)) {
4123                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4124                         return -EINVAL;
4125                 }
4126                 if (!info->linking)
4127                         break;
4128                 if (netif_is_bridge_master(upper_dev) &&
4129                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4130                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4131                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4132                         return -EOPNOTSUPP;
4133                 if (netdev_has_any_upper_dev(upper_dev) &&
4134                     (!netif_is_bridge_master(upper_dev) ||
4135                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4136                                                           upper_dev))) {
4137                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4138                         return -EINVAL;
4139                 }
4140                 if (netif_is_macvlan(upper_dev) &&
4141                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4142                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4143                         return -EOPNOTSUPP;
4144                 }
4145                 break;
4146         case NETDEV_CHANGEUPPER:
4147                 upper_dev = info->upper_dev;
4148                 if (netif_is_bridge_master(upper_dev)) {
4149                         if (info->linking)
4150                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4151                                                                 vlan_dev,
4152                                                                 upper_dev,
4153                                                                 extack);
4154                         else
4155                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4156                                                            vlan_dev,
4157                                                            upper_dev);
4158                 } else if (netif_is_macvlan(upper_dev)) {
4159                         if (!info->linking)
4160                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4161                 } else {
4162                         err = -EINVAL;
4163                         WARN_ON(1);
4164                 }
4165                 break;
4166         }
4167
4168         return err;
4169 }
4170
4171 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4172                                                   struct net_device *lag_dev,
4173                                                   unsigned long event,
4174                                                   void *ptr, u16 vid)
4175 {
4176         struct net_device *dev;
4177         struct list_head *iter;
4178         int ret;
4179
4180         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4181                 if (mlxsw_sp_port_dev_check(dev)) {
4182                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4183                                                                  event, ptr,
4184                                                                  vid);
4185                         if (ret)
4186                                 return ret;
4187                 }
4188         }
4189
4190         return 0;
4191 }
4192
4193 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4194                                                 struct net_device *br_dev,
4195                                                 unsigned long event, void *ptr,
4196                                                 u16 vid)
4197 {
4198         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4199         struct netdev_notifier_changeupper_info *info = ptr;
4200         struct netlink_ext_ack *extack;
4201         struct net_device *upper_dev;
4202
4203         if (!mlxsw_sp)
4204                 return 0;
4205
4206         extack = netdev_notifier_info_to_extack(&info->info);
4207
4208         switch (event) {
4209         case NETDEV_PRECHANGEUPPER:
4210                 upper_dev = info->upper_dev;
4211                 if (!netif_is_macvlan(upper_dev)) {
4212                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4213                         return -EOPNOTSUPP;
4214                 }
4215                 if (!info->linking)
4216                         break;
4217                 if (netif_is_macvlan(upper_dev) &&
4218                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4219                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4220                         return -EOPNOTSUPP;
4221                 }
4222                 break;
4223         case NETDEV_CHANGEUPPER:
4224                 upper_dev = info->upper_dev;
4225                 if (info->linking)
4226                         break;
4227                 if (netif_is_macvlan(upper_dev))
4228                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4229                 break;
4230         }
4231
4232         return 0;
4233 }
4234
4235 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4236                                          unsigned long event, void *ptr)
4237 {
4238         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4239         u16 vid = vlan_dev_vlan_id(vlan_dev);
4240
4241         if (mlxsw_sp_port_dev_check(real_dev))
4242                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4243                                                           event, ptr, vid);
4244         else if (netif_is_lag_master(real_dev))
4245                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4246                                                               real_dev, event,
4247                                                               ptr, vid);
4248         else if (netif_is_bridge_master(real_dev))
4249                 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4250                                                             event, ptr, vid);
4251
4252         return 0;
4253 }
4254
4255 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4256                                            unsigned long event, void *ptr)
4257 {
4258         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4259         struct netdev_notifier_changeupper_info *info = ptr;
4260         struct netlink_ext_ack *extack;
4261         struct net_device *upper_dev;
4262         u16 proto;
4263
4264         if (!mlxsw_sp)
4265                 return 0;
4266
4267         extack = netdev_notifier_info_to_extack(&info->info);
4268
4269         switch (event) {
4270         case NETDEV_PRECHANGEUPPER:
4271                 upper_dev = info->upper_dev;
4272                 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4273                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4274                         return -EOPNOTSUPP;
4275                 }
4276                 if (!info->linking)
4277                         break;
4278                 if (br_vlan_enabled(br_dev)) {
4279                         br_vlan_get_proto(br_dev, &proto);
4280                         if (proto == ETH_P_8021AD) {
4281                                 NL_SET_ERR_MSG_MOD(extack, "Uppers are not supported on top of an 802.1ad bridge");
4282                                 return -EOPNOTSUPP;
4283                         }
4284                 }
4285                 if (is_vlan_dev(upper_dev) &&
4286                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4287                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4288                         return -EOPNOTSUPP;
4289                 }
4290                 if (netif_is_macvlan(upper_dev) &&
4291                     !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4292                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4293                         return -EOPNOTSUPP;
4294                 }
4295                 break;
4296         case NETDEV_CHANGEUPPER:
4297                 upper_dev = info->upper_dev;
4298                 if (info->linking)
4299                         break;
4300                 if (is_vlan_dev(upper_dev))
4301                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4302                 if (netif_is_macvlan(upper_dev))
4303                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4304                 break;
4305         }
4306
4307         return 0;
4308 }
4309
4310 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4311                                             unsigned long event, void *ptr)
4312 {
4313         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4314         struct netdev_notifier_changeupper_info *info = ptr;
4315         struct netlink_ext_ack *extack;
4316
4317         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4318                 return 0;
4319
4320         extack = netdev_notifier_info_to_extack(&info->info);
4321
4322         /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
4323         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4324
4325         return -EOPNOTSUPP;
4326 }
4327
4328 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4329 {
4330         struct netdev_notifier_changeupper_info *info = ptr;
4331
4332         if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4333                 return false;
4334         return netif_is_l3_master(info->upper_dev);
4335 }
4336
4337 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4338                                           struct net_device *dev,
4339                                           unsigned long event, void *ptr)
4340 {
4341         struct netdev_notifier_changeupper_info *cu_info;
4342         struct netdev_notifier_info *info = ptr;
4343         struct netlink_ext_ack *extack;
4344         struct net_device *upper_dev;
4345
4346         extack = netdev_notifier_info_to_extack(info);
4347
4348         switch (event) {
4349         case NETDEV_CHANGEUPPER:
4350                 cu_info = container_of(info,
4351                                        struct netdev_notifier_changeupper_info,
4352                                        info);
4353                 upper_dev = cu_info->upper_dev;
4354                 if (!netif_is_bridge_master(upper_dev))
4355                         return 0;
4356                 if (!mlxsw_sp_lower_get(upper_dev))
4357                         return 0;
4358                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4359                         return -EOPNOTSUPP;
4360                 if (cu_info->linking) {
4361                         if (!netif_running(dev))
4362                                 return 0;
4363                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
4364                          * device needs to be mapped to a VLAN, but at this
4365                          * point no VLANs are configured on the VxLAN device
4366                          */
4367                         if (br_vlan_enabled(upper_dev))
4368                                 return 0;
4369                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4370                                                           dev, 0, extack);
4371                 } else {
4372                         /* VLANs were already flushed, which triggered the
4373                          * necessary cleanup
4374                          */
4375                         if (br_vlan_enabled(upper_dev))
4376                                 return 0;
4377                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4378                 }
4379                 break;
4380         case NETDEV_PRE_UP:
4381                 upper_dev = netdev_master_upper_dev_get(dev);
4382                 if (!upper_dev)
4383                         return 0;
4384                 if (!netif_is_bridge_master(upper_dev))
4385                         return 0;
4386                 if (!mlxsw_sp_lower_get(upper_dev))
4387                         return 0;
4388                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4389                                                   extack);
4390         case NETDEV_DOWN:
4391                 upper_dev = netdev_master_upper_dev_get(dev);
4392                 if (!upper_dev)
4393                         return 0;
4394                 if (!netif_is_bridge_master(upper_dev))
4395                         return 0;
4396                 if (!mlxsw_sp_lower_get(upper_dev))
4397                         return 0;
4398                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4399                 break;
4400         }
4401
4402         return 0;
4403 }
4404
4405 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4406                                     unsigned long event, void *ptr)
4407 {
4408         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4409         struct mlxsw_sp_span_entry *span_entry;
4410         struct mlxsw_sp *mlxsw_sp;
4411         int err = 0;
4412
4413         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4414         if (event == NETDEV_UNREGISTER) {
4415                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4416                 if (span_entry)
4417                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4418         }
4419         mlxsw_sp_span_respin(mlxsw_sp);
4420
4421         if (netif_is_vxlan(dev))
4422                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4423         if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4424                 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4425                                                        event, ptr);
4426         else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4427                 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4428                                                        event, ptr);
4429         else if (event == NETDEV_PRE_CHANGEADDR ||
4430                  event == NETDEV_CHANGEADDR ||
4431                  event == NETDEV_CHANGEMTU)
4432                 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4433         else if (mlxsw_sp_is_vrf_event(event, ptr))
4434                 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4435         else if (mlxsw_sp_port_dev_check(dev))
4436                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4437         else if (netif_is_lag_master(dev))
4438                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4439         else if (is_vlan_dev(dev))
4440                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4441         else if (netif_is_bridge_master(dev))
4442                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4443         else if (netif_is_macvlan(dev))
4444                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4445
4446         return notifier_from_errno(err);
4447 }
4448
4449 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4450         .notifier_call = mlxsw_sp_inetaddr_valid_event,
4451 };
4452
4453 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4454         .notifier_call = mlxsw_sp_inet6addr_valid_event,
4455 };
4456
4457 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4458         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4459         {0, },
4460 };
4461
4462 static struct pci_driver mlxsw_sp1_pci_driver = {
4463         .name = mlxsw_sp1_driver_name,
4464         .id_table = mlxsw_sp1_pci_id_table,
4465 };
4466
4467 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4468         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4469         {0, },
4470 };
4471
4472 static struct pci_driver mlxsw_sp2_pci_driver = {
4473         .name = mlxsw_sp2_driver_name,
4474         .id_table = mlxsw_sp2_pci_id_table,
4475 };
4476
4477 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4478         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4479         {0, },
4480 };
4481
4482 static struct pci_driver mlxsw_sp3_pci_driver = {
4483         .name = mlxsw_sp3_driver_name,
4484         .id_table = mlxsw_sp3_pci_id_table,
4485 };
4486
4487 static int __init mlxsw_sp_module_init(void)
4488 {
4489         int err;
4490
4491         register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4492         register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4493
4494         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4495         if (err)
4496                 goto err_sp1_core_driver_register;
4497
4498         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4499         if (err)
4500                 goto err_sp2_core_driver_register;
4501
4502         err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4503         if (err)
4504                 goto err_sp3_core_driver_register;
4505
4506         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4507         if (err)
4508                 goto err_sp1_pci_driver_register;
4509
4510         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4511         if (err)
4512                 goto err_sp2_pci_driver_register;
4513
4514         err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4515         if (err)
4516                 goto err_sp3_pci_driver_register;
4517
4518         return 0;
4519
4520 err_sp3_pci_driver_register:
4521         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4522 err_sp2_pci_driver_register:
4523         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4524 err_sp1_pci_driver_register:
4525         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4526 err_sp3_core_driver_register:
4527         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4528 err_sp2_core_driver_register:
4529         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4530 err_sp1_core_driver_register:
4531         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4532         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4533         return err;
4534 }
4535
4536 static void __exit mlxsw_sp_module_exit(void)
4537 {
4538         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4539         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4540         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4541         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4542         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4543         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4544         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4545         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4546 }
4547
4548 module_init(mlxsw_sp_module_init);
4549 module_exit(mlxsw_sp_module_exit);
4550
4551 MODULE_LICENSE("Dual BSD/GPL");
4552 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4553 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4554 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4555 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4556 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4557 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4558 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4559 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);