67133ba5301586bbe13021cc492c7c9081f9e44e
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <net/switchdev.h>
26 #include <net/pkt_cls.h>
27 #include <net/tc_act/tc_mirred.h>
28 #include <net/netevent.h>
29 #include <net/tc_act/tc_sample.h>
30 #include <net/addrconf.h>
31
32 #include "spectrum.h"
33 #include "pci.h"
34 #include "core.h"
35 #include "core_env.h"
36 #include "reg.h"
37 #include "port.h"
38 #include "trap.h"
39 #include "txheader.h"
40 #include "spectrum_cnt.h"
41 #include "spectrum_dpipe.h"
42 #include "spectrum_acl_flex_actions.h"
43 #include "spectrum_span.h"
44 #include "spectrum_ptp.h"
45 #include "../mlxfw/mlxfw.h"
46
47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
48
49 #define MLXSW_SP1_FWREV_MAJOR 13
50 #define MLXSW_SP1_FWREV_MINOR 2000
51 #define MLXSW_SP1_FWREV_SUBMINOR 1122
52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
53
54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
55         .major = MLXSW_SP1_FWREV_MAJOR,
56         .minor = MLXSW_SP1_FWREV_MINOR,
57         .subminor = MLXSW_SP1_FWREV_SUBMINOR,
58         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
59 };
60
61 #define MLXSW_SP1_FW_FILENAME \
62         "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
63         "." __stringify(MLXSW_SP1_FWREV_MINOR) \
64         "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
65
66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
68 static const char mlxsw_sp_driver_version[] = "1.0";
69
70 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
71         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
72 };
73 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
74         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
75 };
76
77 /* tx_hdr_version
78  * Tx header version.
79  * Must be set to 1.
80  */
81 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
82
83 /* tx_hdr_ctl
84  * Packet control type.
85  * 0 - Ethernet control (e.g. EMADs, LACP)
86  * 1 - Ethernet data
87  */
88 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
89
90 /* tx_hdr_proto
91  * Packet protocol type. Must be set to 1 (Ethernet).
92  */
93 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
94
95 /* tx_hdr_rx_is_router
96  * Packet is sent from the router. Valid for data packets only.
97  */
98 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
99
100 /* tx_hdr_fid_valid
101  * Indicates if the 'fid' field is valid and should be used for
102  * forwarding lookup. Valid for data packets only.
103  */
104 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
105
106 /* tx_hdr_swid
107  * Switch partition ID. Must be set to 0.
108  */
109 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
110
111 /* tx_hdr_control_tclass
112  * Indicates if the packet should use the control TClass and not one
113  * of the data TClasses.
114  */
115 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
116
117 /* tx_hdr_etclass
118  * Egress TClass to be used on the egress device on the egress port.
119  */
120 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
121
122 /* tx_hdr_port_mid
123  * Destination local port for unicast packets.
124  * Destination multicast ID for multicast packets.
125  *
126  * Control packets are directed to a specific egress port, while data
127  * packets are transmitted through the CPU port (0) into the switch partition,
128  * where forwarding rules are applied.
129  */
130 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
131
132 /* tx_hdr_fid
133  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
134  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
135  * Valid for data packets only.
136  */
137 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
138
139 /* tx_hdr_type
140  * 0 - Data packets
141  * 6 - Control packets
142  */
143 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
144
145 struct mlxsw_sp_mlxfw_dev {
146         struct mlxfw_dev mlxfw_dev;
147         struct mlxsw_sp *mlxsw_sp;
148 };
149
150 struct mlxsw_sp_ptp_ops {
151         struct mlxsw_sp_ptp_clock *
152                 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev);
153         void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock);
154
155         struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp);
156         void (*fini)(struct mlxsw_sp_ptp_state *ptp_state);
157
158         /* Notify a driver that a packet that might be PTP was received. Driver
159          * is responsible for freeing the passed-in SKB.
160          */
161         void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
162                         u8 local_port);
163
164         /* Notify a driver that a timestamped packet was transmitted. Driver
165          * is responsible for freeing the passed-in SKB.
166          */
167         void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
168                             u8 local_port);
169
170         int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
171                             struct hwtstamp_config *config);
172         int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
173                             struct hwtstamp_config *config);
174         int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
175                            struct ethtool_ts_info *info);
176 };
177
178 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
179                                     u16 component_index, u32 *p_max_size,
180                                     u8 *p_align_bits, u16 *p_max_write_size)
181 {
182         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
183                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
184         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
185         char mcqi_pl[MLXSW_REG_MCQI_LEN];
186         int err;
187
188         mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
189         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
190         if (err)
191                 return err;
192         mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
193                               p_max_write_size);
194
195         *p_align_bits = max_t(u8, *p_align_bits, 2);
196         *p_max_write_size = min_t(u16, *p_max_write_size,
197                                   MLXSW_REG_MCDA_MAX_DATA_LEN);
198         return 0;
199 }
200
201 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
202 {
203         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
204                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
205         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
206         char mcc_pl[MLXSW_REG_MCC_LEN];
207         u8 control_state;
208         int err;
209
210         mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
211         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
212         if (err)
213                 return err;
214
215         mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
216         if (control_state != MLXFW_FSM_STATE_IDLE)
217                 return -EBUSY;
218
219         mlxsw_reg_mcc_pack(mcc_pl,
220                            MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
221                            0, *fwhandle, 0);
222         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
223 }
224
225 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
226                                          u32 fwhandle, u16 component_index,
227                                          u32 component_size)
228 {
229         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
230                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
231         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
232         char mcc_pl[MLXSW_REG_MCC_LEN];
233
234         mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
235                            component_index, fwhandle, component_size);
236         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
237 }
238
239 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
240                                        u32 fwhandle, u8 *data, u16 size,
241                                        u32 offset)
242 {
243         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
244                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
245         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
246         char mcda_pl[MLXSW_REG_MCDA_LEN];
247
248         mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
249         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
250 }
251
252 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
253                                          u32 fwhandle, u16 component_index)
254 {
255         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
256                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
257         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
258         char mcc_pl[MLXSW_REG_MCC_LEN];
259
260         mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
261                            component_index, fwhandle, 0);
262         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
263 }
264
265 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
266 {
267         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
268                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
269         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
270         char mcc_pl[MLXSW_REG_MCC_LEN];
271
272         mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
273                            fwhandle, 0);
274         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
275 }
276
277 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
278                                     enum mlxfw_fsm_state *fsm_state,
279                                     enum mlxfw_fsm_state_err *fsm_state_err)
280 {
281         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
282                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
283         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
284         char mcc_pl[MLXSW_REG_MCC_LEN];
285         u8 control_state;
286         u8 error_code;
287         int err;
288
289         mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
290         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
291         if (err)
292                 return err;
293
294         mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
295         *fsm_state = control_state;
296         *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
297                                MLXFW_FSM_STATE_ERR_MAX);
298         return 0;
299 }
300
301 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
302 {
303         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
304                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
305         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
306         char mcc_pl[MLXSW_REG_MCC_LEN];
307
308         mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
309                            fwhandle, 0);
310         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
311 }
312
313 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
314 {
315         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
316                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
317         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
318         char mcc_pl[MLXSW_REG_MCC_LEN];
319
320         mlxsw_reg_mcc_pack(mcc_pl,
321                            MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
322                            fwhandle, 0);
323         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
324 }
325
326 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev,
327                                    const char *msg, const char *comp_name,
328                                    u32 done_bytes, u32 total_bytes)
329 {
330         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
331                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
332         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
333
334         devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core),
335                                            msg, comp_name,
336                                            done_bytes, total_bytes);
337 }
338
339 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
340         .component_query        = mlxsw_sp_component_query,
341         .fsm_lock               = mlxsw_sp_fsm_lock,
342         .fsm_component_update   = mlxsw_sp_fsm_component_update,
343         .fsm_block_download     = mlxsw_sp_fsm_block_download,
344         .fsm_component_verify   = mlxsw_sp_fsm_component_verify,
345         .fsm_activate           = mlxsw_sp_fsm_activate,
346         .fsm_query_state        = mlxsw_sp_fsm_query_state,
347         .fsm_cancel             = mlxsw_sp_fsm_cancel,
348         .fsm_release            = mlxsw_sp_fsm_release,
349         .status_notify          = mlxsw_sp_status_notify,
350 };
351
352 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
353                                    const struct firmware *firmware,
354                                    struct netlink_ext_ack *extack)
355 {
356         struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
357                 .mlxfw_dev = {
358                         .ops = &mlxsw_sp_mlxfw_dev_ops,
359                         .psid = mlxsw_sp->bus_info->psid,
360                         .psid_size = strlen(mlxsw_sp->bus_info->psid),
361                 },
362                 .mlxsw_sp = mlxsw_sp
363         };
364         int err;
365
366         mlxsw_core_fw_flash_start(mlxsw_sp->core);
367         devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core));
368         err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
369                                    firmware, extack);
370         devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core));
371         mlxsw_core_fw_flash_end(mlxsw_sp->core);
372
373         return err;
374 }
375
376 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
377 {
378         const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
379         const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
380         const char *fw_filename = mlxsw_sp->fw_filename;
381         union devlink_param_value value;
382         const struct firmware *firmware;
383         int err;
384
385         /* Don't check if driver does not require it */
386         if (!req_rev || !fw_filename)
387                 return 0;
388
389         /* Don't check if devlink 'fw_load_policy' param is 'flash' */
390         err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
391                                                  DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
392                                                  &value);
393         if (err)
394                 return err;
395         if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
396                 return 0;
397
398         /* Validate driver & FW are compatible */
399         if (rev->major != req_rev->major) {
400                 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
401                      rev->major, req_rev->major);
402                 return -EINVAL;
403         }
404         if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
405             MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
406             (rev->minor > req_rev->minor ||
407              (rev->minor == req_rev->minor &&
408               rev->subminor >= req_rev->subminor)))
409                 return 0;
410
411         dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
412                  rev->major, rev->minor, rev->subminor);
413         dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
414                  fw_filename);
415
416         err = request_firmware_direct(&firmware, fw_filename,
417                                       mlxsw_sp->bus_info->dev);
418         if (err) {
419                 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
420                         fw_filename);
421                 return err;
422         }
423
424         err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL);
425         release_firmware(firmware);
426         if (err)
427                 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
428
429         /* On FW flash success, tell the caller FW reset is needed
430          * if current FW supports it.
431          */
432         if (rev->minor >= req_rev->can_reset_minor)
433                 return err ? err : -EAGAIN;
434         else
435                 return 0;
436 }
437
438 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core,
439                                  const char *file_name, const char *component,
440                                  struct netlink_ext_ack *extack)
441 {
442         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
443         const struct firmware *firmware;
444         int err;
445
446         if (component)
447                 return -EOPNOTSUPP;
448
449         err = request_firmware_direct(&firmware, file_name,
450                                       mlxsw_sp->bus_info->dev);
451         if (err)
452                 return err;
453         err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack);
454         release_firmware(firmware);
455
456         return err;
457 }
458
459 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
460                               unsigned int counter_index, u64 *packets,
461                               u64 *bytes)
462 {
463         char mgpc_pl[MLXSW_REG_MGPC_LEN];
464         int err;
465
466         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
467                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
468         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
469         if (err)
470                 return err;
471         if (packets)
472                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
473         if (bytes)
474                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
475         return 0;
476 }
477
478 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
479                                        unsigned int counter_index)
480 {
481         char mgpc_pl[MLXSW_REG_MGPC_LEN];
482
483         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
484                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
485         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
486 }
487
488 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
489                                 unsigned int *p_counter_index)
490 {
491         int err;
492
493         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
494                                      p_counter_index);
495         if (err)
496                 return err;
497         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
498         if (err)
499                 goto err_counter_clear;
500         return 0;
501
502 err_counter_clear:
503         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
504                               *p_counter_index);
505         return err;
506 }
507
508 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
509                                 unsigned int counter_index)
510 {
511          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
512                                counter_index);
513 }
514
515 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
516                                      const struct mlxsw_tx_info *tx_info)
517 {
518         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
519
520         memset(txhdr, 0, MLXSW_TXHDR_LEN);
521
522         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
523         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
524         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
525         mlxsw_tx_hdr_swid_set(txhdr, 0);
526         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
527         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
528         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
529 }
530
531 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
532 {
533         switch (state) {
534         case BR_STATE_FORWARDING:
535                 return MLXSW_REG_SPMS_STATE_FORWARDING;
536         case BR_STATE_LEARNING:
537                 return MLXSW_REG_SPMS_STATE_LEARNING;
538         case BR_STATE_LISTENING: /* fall-through */
539         case BR_STATE_DISABLED: /* fall-through */
540         case BR_STATE_BLOCKING:
541                 return MLXSW_REG_SPMS_STATE_DISCARDING;
542         default:
543                 BUG();
544         }
545 }
546
547 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
548                               u8 state)
549 {
550         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
551         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
552         char *spms_pl;
553         int err;
554
555         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
556         if (!spms_pl)
557                 return -ENOMEM;
558         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
559         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
560
561         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
562         kfree(spms_pl);
563         return err;
564 }
565
566 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
567 {
568         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
569         int err;
570
571         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
572         if (err)
573                 return err;
574         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
575         return 0;
576 }
577
578 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
579                                     bool enable, u32 rate)
580 {
581         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
582         char mpsc_pl[MLXSW_REG_MPSC_LEN];
583
584         mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
585         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
586 }
587
588 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
589                                           bool is_up)
590 {
591         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
592         char paos_pl[MLXSW_REG_PAOS_LEN];
593
594         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
595                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
596                             MLXSW_PORT_ADMIN_STATUS_DOWN);
597         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
598 }
599
600 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
601                                       unsigned char *addr)
602 {
603         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
604         char ppad_pl[MLXSW_REG_PPAD_LEN];
605
606         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
607         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
608         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
609 }
610
611 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
612 {
613         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
614         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
615
616         ether_addr_copy(addr, mlxsw_sp->base_mac);
617         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
618         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
619 }
620
621 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
622 {
623         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
624         char pmtu_pl[MLXSW_REG_PMTU_LEN];
625         int max_mtu;
626         int err;
627
628         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
629         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
630         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
631         if (err)
632                 return err;
633         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
634
635         if (mtu > max_mtu)
636                 return -EINVAL;
637
638         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
639         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
640 }
641
642 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
643 {
644         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
645         char pspa_pl[MLXSW_REG_PSPA_LEN];
646
647         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
648         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
649 }
650
651 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
652 {
653         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
654         char svpe_pl[MLXSW_REG_SVPE_LEN];
655
656         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
657         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
658 }
659
660 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
661                                    bool learn_enable)
662 {
663         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
664         char *spvmlr_pl;
665         int err;
666
667         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
668         if (!spvmlr_pl)
669                 return -ENOMEM;
670         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
671                               learn_enable);
672         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
673         kfree(spvmlr_pl);
674         return err;
675 }
676
677 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
678                                     u16 vid)
679 {
680         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
681         char spvid_pl[MLXSW_REG_SPVID_LEN];
682
683         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
684         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
685 }
686
687 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
688                                             bool allow)
689 {
690         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
691         char spaft_pl[MLXSW_REG_SPAFT_LEN];
692
693         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
694         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
695 }
696
697 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
698 {
699         int err;
700
701         if (!vid) {
702                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
703                 if (err)
704                         return err;
705         } else {
706                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
707                 if (err)
708                         return err;
709                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
710                 if (err)
711                         goto err_port_allow_untagged_set;
712         }
713
714         mlxsw_sp_port->pvid = vid;
715         return 0;
716
717 err_port_allow_untagged_set:
718         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
719         return err;
720 }
721
722 static int
723 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
724 {
725         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
726         char sspr_pl[MLXSW_REG_SSPR_LEN];
727
728         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
729         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
730 }
731
732 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
733                                          u8 local_port, u8 *p_module,
734                                          u8 *p_width, u8 *p_lane)
735 {
736         char pmlp_pl[MLXSW_REG_PMLP_LEN];
737         int err;
738
739         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
740         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
741         if (err)
742                 return err;
743         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
744         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
745         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
746         return 0;
747 }
748
749 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
750                                     u8 module, u8 width, u8 lane)
751 {
752         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
753         char pmlp_pl[MLXSW_REG_PMLP_LEN];
754         int i;
755
756         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
757         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
758         for (i = 0; i < width; i++) {
759                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
760                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
761         }
762
763         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
764 }
765
766 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
767 {
768         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
769         char pmlp_pl[MLXSW_REG_PMLP_LEN];
770
771         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
772         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
773         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
774 }
775
776 static int mlxsw_sp_port_open(struct net_device *dev)
777 {
778         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
779         int err;
780
781         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
782         if (err)
783                 return err;
784         netif_start_queue(dev);
785         return 0;
786 }
787
788 static int mlxsw_sp_port_stop(struct net_device *dev)
789 {
790         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791
792         netif_stop_queue(dev);
793         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
794 }
795
796 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
797                                       struct net_device *dev)
798 {
799         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
800         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
801         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
802         const struct mlxsw_tx_info tx_info = {
803                 .local_port = mlxsw_sp_port->local_port,
804                 .is_emad = false,
805         };
806         u64 len;
807         int err;
808
809         memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
810
811         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
812                 return NETDEV_TX_BUSY;
813
814         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
815                 struct sk_buff *skb_orig = skb;
816
817                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
818                 if (!skb) {
819                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
820                         dev_kfree_skb_any(skb_orig);
821                         return NETDEV_TX_OK;
822                 }
823                 dev_consume_skb_any(skb_orig);
824         }
825
826         if (eth_skb_pad(skb)) {
827                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
828                 return NETDEV_TX_OK;
829         }
830
831         mlxsw_sp_txhdr_construct(skb, &tx_info);
832         /* TX header is consumed by HW on the way so we shouldn't count its
833          * bytes as being sent.
834          */
835         len = skb->len - MLXSW_TXHDR_LEN;
836
837         /* Due to a race we might fail here because of a full queue. In that
838          * unlikely case we simply drop the packet.
839          */
840         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
841
842         if (!err) {
843                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
844                 u64_stats_update_begin(&pcpu_stats->syncp);
845                 pcpu_stats->tx_packets++;
846                 pcpu_stats->tx_bytes += len;
847                 u64_stats_update_end(&pcpu_stats->syncp);
848         } else {
849                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
850                 dev_kfree_skb_any(skb);
851         }
852         return NETDEV_TX_OK;
853 }
854
855 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
856 {
857 }
858
859 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
860 {
861         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
862         struct sockaddr *addr = p;
863         int err;
864
865         if (!is_valid_ether_addr(addr->sa_data))
866                 return -EADDRNOTAVAIL;
867
868         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
869         if (err)
870                 return err;
871         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
872         return 0;
873 }
874
875 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
876                                          int mtu)
877 {
878         return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
879 }
880
881 #define MLXSW_SP_CELL_FACTOR 2  /* 2 * cell_size / (IPG + cell_size + 1) */
882
883 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
884                                   u16 delay)
885 {
886         delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
887                                                             BITS_PER_BYTE));
888         return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
889                                                                    mtu);
890 }
891
892 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
893  * Assumes 100m cable and maximum MTU.
894  */
895 #define MLXSW_SP_PAUSE_DELAY 58752
896
897 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
898                                      u16 delay, bool pfc, bool pause)
899 {
900         if (pfc)
901                 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
902         else if (pause)
903                 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
904         else
905                 return 0;
906 }
907
908 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
909                                  bool lossy)
910 {
911         if (lossy)
912                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
913         else
914                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
915                                                     thres);
916 }
917
918 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
919                                  u8 *prio_tc, bool pause_en,
920                                  struct ieee_pfc *my_pfc)
921 {
922         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
923         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
924         u16 delay = !!my_pfc ? my_pfc->delay : 0;
925         char pbmc_pl[MLXSW_REG_PBMC_LEN];
926         u32 taken_headroom_cells = 0;
927         u32 max_headroom_cells;
928         int i, j, err;
929
930         max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp);
931
932         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
933         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
934         if (err)
935                 return err;
936
937         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
938                 bool configure = false;
939                 bool pfc = false;
940                 u16 thres_cells;
941                 u16 delay_cells;
942                 u16 total_cells;
943                 bool lossy;
944
945                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
946                         if (prio_tc[j] == i) {
947                                 pfc = pfc_en & BIT(j);
948                                 configure = true;
949                                 break;
950                         }
951                 }
952
953                 if (!configure)
954                         continue;
955
956                 lossy = !(pfc || pause_en);
957                 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
958                 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
959                                                         pfc, pause_en);
960                 total_cells = thres_cells + delay_cells;
961
962                 taken_headroom_cells += total_cells;
963                 if (taken_headroom_cells > max_headroom_cells)
964                         return -ENOBUFS;
965
966                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells,
967                                      thres_cells, lossy);
968         }
969
970         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
971 }
972
973 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
974                                       int mtu, bool pause_en)
975 {
976         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
977         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
978         struct ieee_pfc *my_pfc;
979         u8 *prio_tc;
980
981         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
982         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
983
984         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
985                                             pause_en, my_pfc);
986 }
987
988 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
989 {
990         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
991         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
992         int err;
993
994         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
995         if (err)
996                 return err;
997         err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
998         if (err)
999                 goto err_span_port_mtu_update;
1000         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1001         if (err)
1002                 goto err_port_mtu_set;
1003         dev->mtu = mtu;
1004         return 0;
1005
1006 err_port_mtu_set:
1007         mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1008 err_span_port_mtu_update:
1009         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1010         return err;
1011 }
1012
1013 static int
1014 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1015                              struct rtnl_link_stats64 *stats)
1016 {
1017         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1018         struct mlxsw_sp_port_pcpu_stats *p;
1019         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1020         u32 tx_dropped = 0;
1021         unsigned int start;
1022         int i;
1023
1024         for_each_possible_cpu(i) {
1025                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1026                 do {
1027                         start = u64_stats_fetch_begin_irq(&p->syncp);
1028                         rx_packets      = p->rx_packets;
1029                         rx_bytes        = p->rx_bytes;
1030                         tx_packets      = p->tx_packets;
1031                         tx_bytes        = p->tx_bytes;
1032                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1033
1034                 stats->rx_packets       += rx_packets;
1035                 stats->rx_bytes         += rx_bytes;
1036                 stats->tx_packets       += tx_packets;
1037                 stats->tx_bytes         += tx_bytes;
1038                 /* tx_dropped is u32, updated without syncp protection. */
1039                 tx_dropped      += p->tx_dropped;
1040         }
1041         stats->tx_dropped       = tx_dropped;
1042         return 0;
1043 }
1044
1045 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
1046 {
1047         switch (attr_id) {
1048         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1049                 return true;
1050         }
1051
1052         return false;
1053 }
1054
1055 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1056                                            void *sp)
1057 {
1058         switch (attr_id) {
1059         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1060                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1061         }
1062
1063         return -EINVAL;
1064 }
1065
1066 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1067                                        int prio, char *ppcnt_pl)
1068 {
1069         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1070         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1071
1072         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1073         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1074 }
1075
1076 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1077                                       struct rtnl_link_stats64 *stats)
1078 {
1079         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1080         int err;
1081
1082         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1083                                           0, ppcnt_pl);
1084         if (err)
1085                 goto out;
1086
1087         stats->tx_packets =
1088                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1089         stats->rx_packets =
1090                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1091         stats->tx_bytes =
1092                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1093         stats->rx_bytes =
1094                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1095         stats->multicast =
1096                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1097
1098         stats->rx_crc_errors =
1099                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1100         stats->rx_frame_errors =
1101                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1102
1103         stats->rx_length_errors = (
1104                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1105                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1106                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1107
1108         stats->rx_errors = (stats->rx_crc_errors +
1109                 stats->rx_frame_errors + stats->rx_length_errors);
1110
1111 out:
1112         return err;
1113 }
1114
1115 static void
1116 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
1117                             struct mlxsw_sp_port_xstats *xstats)
1118 {
1119         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1120         int err, i;
1121
1122         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
1123                                           ppcnt_pl);
1124         if (!err)
1125                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
1126
1127         for (i = 0; i < TC_MAX_QUEUE; i++) {
1128                 err = mlxsw_sp_port_get_stats_raw(dev,
1129                                                   MLXSW_REG_PPCNT_TC_CONG_TC,
1130                                                   i, ppcnt_pl);
1131                 if (!err)
1132                         xstats->wred_drop[i] =
1133                                 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
1134
1135                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
1136                                                   i, ppcnt_pl);
1137                 if (err)
1138                         continue;
1139
1140                 xstats->backlog[i] =
1141                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1142                 xstats->tail_drop[i] =
1143                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
1144         }
1145
1146         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1147                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
1148                                                   i, ppcnt_pl);
1149                 if (err)
1150                         continue;
1151
1152                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
1153                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
1154         }
1155 }
1156
1157 static void update_stats_cache(struct work_struct *work)
1158 {
1159         struct mlxsw_sp_port *mlxsw_sp_port =
1160                 container_of(work, struct mlxsw_sp_port,
1161                              periodic_hw_stats.update_dw.work);
1162
1163         if (!netif_carrier_ok(mlxsw_sp_port->dev))
1164                 goto out;
1165
1166         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1167                                    &mlxsw_sp_port->periodic_hw_stats.stats);
1168         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
1169                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
1170
1171 out:
1172         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1173                                MLXSW_HW_STATS_UPDATE_TIME);
1174 }
1175
1176 /* Return the stats from a cache that is updated periodically,
1177  * as this function might get called in an atomic context.
1178  */
1179 static void
1180 mlxsw_sp_port_get_stats64(struct net_device *dev,
1181                           struct rtnl_link_stats64 *stats)
1182 {
1183         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1184
1185         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1186 }
1187
1188 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1189                                     u16 vid_begin, u16 vid_end,
1190                                     bool is_member, bool untagged)
1191 {
1192         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1193         char *spvm_pl;
1194         int err;
1195
1196         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1197         if (!spvm_pl)
1198                 return -ENOMEM;
1199
1200         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1201                             vid_end, is_member, untagged);
1202         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1203         kfree(spvm_pl);
1204         return err;
1205 }
1206
1207 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1208                            u16 vid_end, bool is_member, bool untagged)
1209 {
1210         u16 vid, vid_e;
1211         int err;
1212
1213         for (vid = vid_begin; vid <= vid_end;
1214              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1215                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1216                             vid_end);
1217
1218                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1219                                                is_member, untagged);
1220                 if (err)
1221                         return err;
1222         }
1223
1224         return 0;
1225 }
1226
1227 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1228                                      bool flush_default)
1229 {
1230         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1231
1232         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1233                                  &mlxsw_sp_port->vlans_list, list) {
1234                 if (!flush_default &&
1235                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1236                         continue;
1237                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1238         }
1239 }
1240
1241 static void
1242 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1243 {
1244         if (mlxsw_sp_port_vlan->bridge_port)
1245                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1246         else if (mlxsw_sp_port_vlan->fid)
1247                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1248 }
1249
1250 struct mlxsw_sp_port_vlan *
1251 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1252 {
1253         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1254         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1255         int err;
1256
1257         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1258         if (mlxsw_sp_port_vlan)
1259                 return ERR_PTR(-EEXIST);
1260
1261         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1262         if (err)
1263                 return ERR_PTR(err);
1264
1265         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1266         if (!mlxsw_sp_port_vlan) {
1267                 err = -ENOMEM;
1268                 goto err_port_vlan_alloc;
1269         }
1270
1271         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1272         mlxsw_sp_port_vlan->vid = vid;
1273         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1274
1275         return mlxsw_sp_port_vlan;
1276
1277 err_port_vlan_alloc:
1278         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1279         return ERR_PTR(err);
1280 }
1281
1282 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1283 {
1284         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1285         u16 vid = mlxsw_sp_port_vlan->vid;
1286
1287         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1288         list_del(&mlxsw_sp_port_vlan->list);
1289         kfree(mlxsw_sp_port_vlan);
1290         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1291 }
1292
1293 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1294                                  __be16 __always_unused proto, u16 vid)
1295 {
1296         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1297
1298         /* VLAN 0 is added to HW filter when device goes up, but it is
1299          * reserved in our case, so simply return.
1300          */
1301         if (!vid)
1302                 return 0;
1303
1304         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1305 }
1306
1307 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1308                                   __be16 __always_unused proto, u16 vid)
1309 {
1310         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1311         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1312
1313         /* VLAN 0 is removed from HW filter when device goes down, but
1314          * it is reserved in our case, so simply return.
1315          */
1316         if (!vid)
1317                 return 0;
1318
1319         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1320         if (!mlxsw_sp_port_vlan)
1321                 return 0;
1322         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1323
1324         return 0;
1325 }
1326
1327 static struct mlxsw_sp_port_mall_tc_entry *
1328 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1329                                  unsigned long cookie) {
1330         struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1331
1332         list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1333                 if (mall_tc_entry->cookie == cookie)
1334                         return mall_tc_entry;
1335
1336         return NULL;
1337 }
1338
1339 static int
1340 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1341                                       struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1342                                       const struct flow_action_entry *act,
1343                                       bool ingress)
1344 {
1345         enum mlxsw_sp_span_type span_type;
1346
1347         if (!act->dev) {
1348                 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1349                 return -EINVAL;
1350         }
1351
1352         mirror->ingress = ingress;
1353         span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1354         return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
1355                                         true, &mirror->span_id);
1356 }
1357
1358 static void
1359 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1360                                       struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1361 {
1362         enum mlxsw_sp_span_type span_type;
1363
1364         span_type = mirror->ingress ?
1365                         MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1366         mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
1367                                  span_type, true);
1368 }
1369
1370 static int
1371 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1372                                       struct tc_cls_matchall_offload *cls,
1373                                       const struct flow_action_entry *act,
1374                                       bool ingress)
1375 {
1376         int err;
1377
1378         if (!mlxsw_sp_port->sample)
1379                 return -EOPNOTSUPP;
1380         if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1381                 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1382                 return -EEXIST;
1383         }
1384         if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
1385                 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1386                 return -EOPNOTSUPP;
1387         }
1388
1389         rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1390                            act->sample.psample_group);
1391         mlxsw_sp_port->sample->truncate = act->sample.truncate;
1392         mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
1393         mlxsw_sp_port->sample->rate = act->sample.rate;
1394
1395         err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
1396         if (err)
1397                 goto err_port_sample_set;
1398         return 0;
1399
1400 err_port_sample_set:
1401         RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1402         return err;
1403 }
1404
1405 static void
1406 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1407 {
1408         if (!mlxsw_sp_port->sample)
1409                 return;
1410
1411         mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1412         RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1413 }
1414
1415 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1416                                           struct tc_cls_matchall_offload *f,
1417                                           bool ingress)
1418 {
1419         struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1420         __be16 protocol = f->common.protocol;
1421         struct flow_action_entry *act;
1422         int err;
1423
1424         if (!flow_offload_has_one_action(&f->rule->action)) {
1425                 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1426                 return -EOPNOTSUPP;
1427         }
1428
1429         mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1430         if (!mall_tc_entry)
1431                 return -ENOMEM;
1432         mall_tc_entry->cookie = f->cookie;
1433
1434         act = &f->rule->action.entries[0];
1435
1436         if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
1437                 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1438
1439                 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1440                 mirror = &mall_tc_entry->mirror;
1441                 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1442                                                             mirror, act,
1443                                                             ingress);
1444         } else if (act->id == FLOW_ACTION_SAMPLE &&
1445                    protocol == htons(ETH_P_ALL)) {
1446                 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1447                 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1448                                                             act, ingress);
1449         } else {
1450                 err = -EOPNOTSUPP;
1451         }
1452
1453         if (err)
1454                 goto err_add_action;
1455
1456         list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1457         return 0;
1458
1459 err_add_action:
1460         kfree(mall_tc_entry);
1461         return err;
1462 }
1463
1464 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1465                                            struct tc_cls_matchall_offload *f)
1466 {
1467         struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1468
1469         mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1470                                                          f->cookie);
1471         if (!mall_tc_entry) {
1472                 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1473                 return;
1474         }
1475         list_del(&mall_tc_entry->list);
1476
1477         switch (mall_tc_entry->type) {
1478         case MLXSW_SP_PORT_MALL_MIRROR:
1479                 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1480                                                       &mall_tc_entry->mirror);
1481                 break;
1482         case MLXSW_SP_PORT_MALL_SAMPLE:
1483                 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1484                 break;
1485         default:
1486                 WARN_ON(1);
1487         }
1488
1489         kfree(mall_tc_entry);
1490 }
1491
1492 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1493                                           struct tc_cls_matchall_offload *f,
1494                                           bool ingress)
1495 {
1496         switch (f->command) {
1497         case TC_CLSMATCHALL_REPLACE:
1498                 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1499                                                       ingress);
1500         case TC_CLSMATCHALL_DESTROY:
1501                 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1502                 return 0;
1503         default:
1504                 return -EOPNOTSUPP;
1505         }
1506 }
1507
1508 static int
1509 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
1510                              struct tc_cls_flower_offload *f)
1511 {
1512         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
1513
1514         switch (f->command) {
1515         case TC_CLSFLOWER_REPLACE:
1516                 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
1517         case TC_CLSFLOWER_DESTROY:
1518                 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
1519                 return 0;
1520         case TC_CLSFLOWER_STATS:
1521                 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
1522         case TC_CLSFLOWER_TMPLT_CREATE:
1523                 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
1524         case TC_CLSFLOWER_TMPLT_DESTROY:
1525                 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
1526                 return 0;
1527         default:
1528                 return -EOPNOTSUPP;
1529         }
1530 }
1531
1532 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
1533                                                void *type_data,
1534                                                void *cb_priv, bool ingress)
1535 {
1536         struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
1537
1538         switch (type) {
1539         case TC_SETUP_CLSMATCHALL:
1540                 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
1541                                                    type_data))
1542                         return -EOPNOTSUPP;
1543
1544                 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
1545                                                       ingress);
1546         case TC_SETUP_CLSFLOWER:
1547                 return 0;
1548         default:
1549                 return -EOPNOTSUPP;
1550         }
1551 }
1552
1553 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
1554                                                   void *type_data,
1555                                                   void *cb_priv)
1556 {
1557         return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1558                                                    cb_priv, true);
1559 }
1560
1561 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
1562                                                   void *type_data,
1563                                                   void *cb_priv)
1564 {
1565         return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1566                                                    cb_priv, false);
1567 }
1568
1569 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
1570                                              void *type_data, void *cb_priv)
1571 {
1572         struct mlxsw_sp_acl_block *acl_block = cb_priv;
1573
1574         switch (type) {
1575         case TC_SETUP_CLSMATCHALL:
1576                 return 0;
1577         case TC_SETUP_CLSFLOWER:
1578                 if (mlxsw_sp_acl_block_disabled(acl_block))
1579                         return -EOPNOTSUPP;
1580
1581                 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
1582         default:
1583                 return -EOPNOTSUPP;
1584         }
1585 }
1586
1587 static int
1588 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1589                                     struct tcf_block *block, bool ingress,
1590                                     struct netlink_ext_ack *extack)
1591 {
1592         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1593         struct mlxsw_sp_acl_block *acl_block;
1594         struct tcf_block_cb *block_cb;
1595         int err;
1596
1597         block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1598                                        mlxsw_sp);
1599         if (!block_cb) {
1600                 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
1601                 if (!acl_block)
1602                         return -ENOMEM;
1603                 block_cb = __tcf_block_cb_register(block,
1604                                                    mlxsw_sp_setup_tc_block_cb_flower,
1605                                                    mlxsw_sp, acl_block, extack);
1606                 if (IS_ERR(block_cb)) {
1607                         err = PTR_ERR(block_cb);
1608                         goto err_cb_register;
1609                 }
1610         } else {
1611                 acl_block = tcf_block_cb_priv(block_cb);
1612         }
1613         tcf_block_cb_incref(block_cb);
1614         err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
1615                                       mlxsw_sp_port, ingress);
1616         if (err)
1617                 goto err_block_bind;
1618
1619         if (ingress)
1620                 mlxsw_sp_port->ing_acl_block = acl_block;
1621         else
1622                 mlxsw_sp_port->eg_acl_block = acl_block;
1623
1624         return 0;
1625
1626 err_block_bind:
1627         if (!tcf_block_cb_decref(block_cb)) {
1628                 __tcf_block_cb_unregister(block, block_cb);
1629 err_cb_register:
1630                 mlxsw_sp_acl_block_destroy(acl_block);
1631         }
1632         return err;
1633 }
1634
1635 static void
1636 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1637                                       struct tcf_block *block, bool ingress)
1638 {
1639         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1640         struct mlxsw_sp_acl_block *acl_block;
1641         struct tcf_block_cb *block_cb;
1642         int err;
1643
1644         block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1645                                        mlxsw_sp);
1646         if (!block_cb)
1647                 return;
1648
1649         if (ingress)
1650                 mlxsw_sp_port->ing_acl_block = NULL;
1651         else
1652                 mlxsw_sp_port->eg_acl_block = NULL;
1653
1654         acl_block = tcf_block_cb_priv(block_cb);
1655         err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
1656                                         mlxsw_sp_port, ingress);
1657         if (!err && !tcf_block_cb_decref(block_cb)) {
1658                 __tcf_block_cb_unregister(block, block_cb);
1659                 mlxsw_sp_acl_block_destroy(acl_block);
1660         }
1661 }
1662
1663 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1664                                    struct tc_block_offload *f)
1665 {
1666         tc_setup_cb_t *cb;
1667         bool ingress;
1668         int err;
1669
1670         if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1671                 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
1672                 ingress = true;
1673         } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1674                 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
1675                 ingress = false;
1676         } else {
1677                 return -EOPNOTSUPP;
1678         }
1679
1680         switch (f->command) {
1681         case TC_BLOCK_BIND:
1682                 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
1683                                             mlxsw_sp_port, f->extack);
1684                 if (err)
1685                         return err;
1686                 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
1687                                                           f->block, ingress,
1688                                                           f->extack);
1689                 if (err) {
1690                         tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1691                         return err;
1692                 }
1693                 return 0;
1694         case TC_BLOCK_UNBIND:
1695                 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
1696                                                       f->block, ingress);
1697                 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1698                 return 0;
1699         default:
1700                 return -EOPNOTSUPP;
1701         }
1702 }
1703
1704 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1705                              void *type_data)
1706 {
1707         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1708
1709         switch (type) {
1710         case TC_SETUP_BLOCK:
1711                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1712         case TC_SETUP_QDISC_RED:
1713                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1714         case TC_SETUP_QDISC_PRIO:
1715                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1716         default:
1717                 return -EOPNOTSUPP;
1718         }
1719 }
1720
1721
1722 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1723 {
1724         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1725
1726         if (!enable) {
1727                 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
1728                     mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
1729                     !list_empty(&mlxsw_sp_port->mall_tc_list)) {
1730                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1731                         return -EINVAL;
1732                 }
1733                 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
1734                 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
1735         } else {
1736                 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
1737                 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
1738         }
1739         return 0;
1740 }
1741
1742 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1743 {
1744         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1745         char pplr_pl[MLXSW_REG_PPLR_LEN];
1746         int err;
1747
1748         if (netif_running(dev))
1749                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1750
1751         mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1752         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1753                               pplr_pl);
1754
1755         if (netif_running(dev))
1756                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1757
1758         return err;
1759 }
1760
1761 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1762
1763 static int mlxsw_sp_handle_feature(struct net_device *dev,
1764                                    netdev_features_t wanted_features,
1765                                    netdev_features_t feature,
1766                                    mlxsw_sp_feature_handler feature_handler)
1767 {
1768         netdev_features_t changes = wanted_features ^ dev->features;
1769         bool enable = !!(wanted_features & feature);
1770         int err;
1771
1772         if (!(changes & feature))
1773                 return 0;
1774
1775         err = feature_handler(dev, enable);
1776         if (err) {
1777                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1778                            enable ? "Enable" : "Disable", &feature, err);
1779                 return err;
1780         }
1781
1782         if (enable)
1783                 dev->features |= feature;
1784         else
1785                 dev->features &= ~feature;
1786
1787         return 0;
1788 }
1789 static int mlxsw_sp_set_features(struct net_device *dev,
1790                                  netdev_features_t features)
1791 {
1792         netdev_features_t oper_features = dev->features;
1793         int err = 0;
1794
1795         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1796                                        mlxsw_sp_feature_hw_tc);
1797         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1798                                        mlxsw_sp_feature_loopback);
1799
1800         if (err) {
1801                 dev->features = oper_features;
1802                 return -EINVAL;
1803         }
1804
1805         return 0;
1806 }
1807
1808 static struct devlink_port *
1809 mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1810 {
1811         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1812         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1813
1814         return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1815                                                 mlxsw_sp_port->local_port);
1816 }
1817
1818 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1819                                       struct ifreq *ifr)
1820 {
1821         struct hwtstamp_config config;
1822         int err;
1823
1824         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1825                 return -EFAULT;
1826
1827         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1828                                                              &config);
1829         if (err)
1830                 return err;
1831
1832         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1833                 return -EFAULT;
1834
1835         return 0;
1836 }
1837
1838 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1839                                       struct ifreq *ifr)
1840 {
1841         struct hwtstamp_config config;
1842         int err;
1843
1844         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1845                                                              &config);
1846         if (err)
1847                 return err;
1848
1849         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1850                 return -EFAULT;
1851
1852         return 0;
1853 }
1854
1855 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1856 {
1857         struct hwtstamp_config config = {0};
1858
1859         mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1860 }
1861
1862 static int
1863 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1864 {
1865         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1866
1867         switch (cmd) {
1868         case SIOCSHWTSTAMP:
1869                 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1870         case SIOCGHWTSTAMP:
1871                 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1872         default:
1873                 return -EOPNOTSUPP;
1874         }
1875 }
1876
1877 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1878         .ndo_open               = mlxsw_sp_port_open,
1879         .ndo_stop               = mlxsw_sp_port_stop,
1880         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1881         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1882         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1883         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1884         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1885         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1886         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1887         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1888         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1889         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1890         .ndo_set_features       = mlxsw_sp_set_features,
1891         .ndo_get_devlink_port   = mlxsw_sp_port_get_devlink_port,
1892         .ndo_do_ioctl           = mlxsw_sp_port_ioctl,
1893 };
1894
1895 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1896                                       struct ethtool_drvinfo *drvinfo)
1897 {
1898         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1899         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1900
1901         strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
1902                 sizeof(drvinfo->driver));
1903         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1904                 sizeof(drvinfo->version));
1905         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1906                  "%d.%d.%d",
1907                  mlxsw_sp->bus_info->fw_rev.major,
1908                  mlxsw_sp->bus_info->fw_rev.minor,
1909                  mlxsw_sp->bus_info->fw_rev.subminor);
1910         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1911                 sizeof(drvinfo->bus_info));
1912 }
1913
1914 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1915                                          struct ethtool_pauseparam *pause)
1916 {
1917         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1918
1919         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1920         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1921 }
1922
1923 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1924                                    struct ethtool_pauseparam *pause)
1925 {
1926         char pfcc_pl[MLXSW_REG_PFCC_LEN];
1927
1928         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1929         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1930         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1931
1932         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1933                                pfcc_pl);
1934 }
1935
1936 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1937                                         struct ethtool_pauseparam *pause)
1938 {
1939         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1940         bool pause_en = pause->tx_pause || pause->rx_pause;
1941         int err;
1942
1943         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1944                 netdev_err(dev, "PFC already enabled on port\n");
1945                 return -EINVAL;
1946         }
1947
1948         if (pause->autoneg) {
1949                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1950                 return -EINVAL;
1951         }
1952
1953         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1954         if (err) {
1955                 netdev_err(dev, "Failed to configure port's headroom\n");
1956                 return err;
1957         }
1958
1959         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1960         if (err) {
1961                 netdev_err(dev, "Failed to set PAUSE parameters\n");
1962                 goto err_port_pause_configure;
1963         }
1964
1965         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1966         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1967
1968         return 0;
1969
1970 err_port_pause_configure:
1971         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1972         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1973         return err;
1974 }
1975
1976 struct mlxsw_sp_port_hw_stats {
1977         char str[ETH_GSTRING_LEN];
1978         u64 (*getter)(const char *payload);
1979         bool cells_bytes;
1980 };
1981
1982 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1983         {
1984                 .str = "a_frames_transmitted_ok",
1985                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1986         },
1987         {
1988                 .str = "a_frames_received_ok",
1989                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1990         },
1991         {
1992                 .str = "a_frame_check_sequence_errors",
1993                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1994         },
1995         {
1996                 .str = "a_alignment_errors",
1997                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1998         },
1999         {
2000                 .str = "a_octets_transmitted_ok",
2001                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
2002         },
2003         {
2004                 .str = "a_octets_received_ok",
2005                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
2006         },
2007         {
2008                 .str = "a_multicast_frames_xmitted_ok",
2009                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
2010         },
2011         {
2012                 .str = "a_broadcast_frames_xmitted_ok",
2013                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
2014         },
2015         {
2016                 .str = "a_multicast_frames_received_ok",
2017                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
2018         },
2019         {
2020                 .str = "a_broadcast_frames_received_ok",
2021                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
2022         },
2023         {
2024                 .str = "a_in_range_length_errors",
2025                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
2026         },
2027         {
2028                 .str = "a_out_of_range_length_field",
2029                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
2030         },
2031         {
2032                 .str = "a_frame_too_long_errors",
2033                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
2034         },
2035         {
2036                 .str = "a_symbol_error_during_carrier",
2037                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
2038         },
2039         {
2040                 .str = "a_mac_control_frames_transmitted",
2041                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
2042         },
2043         {
2044                 .str = "a_mac_control_frames_received",
2045                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
2046         },
2047         {
2048                 .str = "a_unsupported_opcodes_received",
2049                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
2050         },
2051         {
2052                 .str = "a_pause_mac_ctrl_frames_received",
2053                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
2054         },
2055         {
2056                 .str = "a_pause_mac_ctrl_frames_xmitted",
2057                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
2058         },
2059 };
2060
2061 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
2062
2063 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
2064         {
2065                 .str = "if_in_discards",
2066                 .getter = mlxsw_reg_ppcnt_if_in_discards_get,
2067         },
2068         {
2069                 .str = "if_out_discards",
2070                 .getter = mlxsw_reg_ppcnt_if_out_discards_get,
2071         },
2072         {
2073                 .str = "if_out_errors",
2074                 .getter = mlxsw_reg_ppcnt_if_out_errors_get,
2075         },
2076 };
2077
2078 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
2079         ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
2080
2081 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
2082         {
2083                 .str = "ether_stats_undersize_pkts",
2084                 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
2085         },
2086         {
2087                 .str = "ether_stats_oversize_pkts",
2088                 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
2089         },
2090         {
2091                 .str = "ether_stats_fragments",
2092                 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
2093         },
2094         {
2095                 .str = "ether_pkts64octets",
2096                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
2097         },
2098         {
2099                 .str = "ether_pkts65to127octets",
2100                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
2101         },
2102         {
2103                 .str = "ether_pkts128to255octets",
2104                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
2105         },
2106         {
2107                 .str = "ether_pkts256to511octets",
2108                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
2109         },
2110         {
2111                 .str = "ether_pkts512to1023octets",
2112                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
2113         },
2114         {
2115                 .str = "ether_pkts1024to1518octets",
2116                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
2117         },
2118         {
2119                 .str = "ether_pkts1519to2047octets",
2120                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
2121         },
2122         {
2123                 .str = "ether_pkts2048to4095octets",
2124                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
2125         },
2126         {
2127                 .str = "ether_pkts4096to8191octets",
2128                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
2129         },
2130         {
2131                 .str = "ether_pkts8192to10239octets",
2132                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
2133         },
2134 };
2135
2136 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
2137         ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
2138
2139 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
2140         {
2141                 .str = "dot3stats_fcs_errors",
2142                 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
2143         },
2144         {
2145                 .str = "dot3stats_symbol_errors",
2146                 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
2147         },
2148         {
2149                 .str = "dot3control_in_unknown_opcodes",
2150                 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
2151         },
2152         {
2153                 .str = "dot3in_pause_frames",
2154                 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
2155         },
2156 };
2157
2158 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
2159         ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
2160
2161 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
2162         {
2163                 .str = "discard_ingress_general",
2164                 .getter = mlxsw_reg_ppcnt_ingress_general_get,
2165         },
2166         {
2167                 .str = "discard_ingress_policy_engine",
2168                 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
2169         },
2170         {
2171                 .str = "discard_ingress_vlan_membership",
2172                 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
2173         },
2174         {
2175                 .str = "discard_ingress_tag_frame_type",
2176                 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
2177         },
2178         {
2179                 .str = "discard_egress_vlan_membership",
2180                 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
2181         },
2182         {
2183                 .str = "discard_loopback_filter",
2184                 .getter = mlxsw_reg_ppcnt_loopback_filter_get,
2185         },
2186         {
2187                 .str = "discard_egress_general",
2188                 .getter = mlxsw_reg_ppcnt_egress_general_get,
2189         },
2190         {
2191                 .str = "discard_egress_hoq",
2192                 .getter = mlxsw_reg_ppcnt_egress_hoq_get,
2193         },
2194         {
2195                 .str = "discard_egress_policy_engine",
2196                 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
2197         },
2198         {
2199                 .str = "discard_ingress_tx_link_down",
2200                 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
2201         },
2202         {
2203                 .str = "discard_egress_stp_filter",
2204                 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
2205         },
2206         {
2207                 .str = "discard_egress_sll",
2208                 .getter = mlxsw_reg_ppcnt_egress_sll_get,
2209         },
2210 };
2211
2212 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
2213         ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
2214
2215 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
2216         {
2217                 .str = "rx_octets_prio",
2218                 .getter = mlxsw_reg_ppcnt_rx_octets_get,
2219         },
2220         {
2221                 .str = "rx_frames_prio",
2222                 .getter = mlxsw_reg_ppcnt_rx_frames_get,
2223         },
2224         {
2225                 .str = "tx_octets_prio",
2226                 .getter = mlxsw_reg_ppcnt_tx_octets_get,
2227         },
2228         {
2229                 .str = "tx_frames_prio",
2230                 .getter = mlxsw_reg_ppcnt_tx_frames_get,
2231         },
2232         {
2233                 .str = "rx_pause_prio",
2234                 .getter = mlxsw_reg_ppcnt_rx_pause_get,
2235         },
2236         {
2237                 .str = "rx_pause_duration_prio",
2238                 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
2239         },
2240         {
2241                 .str = "tx_pause_prio",
2242                 .getter = mlxsw_reg_ppcnt_tx_pause_get,
2243         },
2244         {
2245                 .str = "tx_pause_duration_prio",
2246                 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
2247         },
2248 };
2249
2250 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
2251
2252 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
2253         {
2254                 .str = "tc_transmit_queue_tc",
2255                 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
2256                 .cells_bytes = true,
2257         },
2258         {
2259                 .str = "tc_no_buffer_discard_uc_tc",
2260                 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
2261         },
2262 };
2263
2264 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2265
2266 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2267                                          MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
2268                                          MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
2269                                          MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
2270                                          MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
2271                                          (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
2272                                           IEEE_8021QAZ_MAX_TCS) + \
2273                                          (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
2274                                           TC_MAX_QUEUE))
2275
2276 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2277 {
2278         int i;
2279
2280         for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2281                 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2282                          mlxsw_sp_port_hw_prio_stats[i].str, prio);
2283                 *p += ETH_GSTRING_LEN;
2284         }
2285 }
2286
2287 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2288 {
2289         int i;
2290
2291         for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2292                 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2293                          mlxsw_sp_port_hw_tc_stats[i].str, tc);
2294                 *p += ETH_GSTRING_LEN;
2295         }
2296 }
2297
2298 static void mlxsw_sp_port_get_strings(struct net_device *dev,
2299                                       u32 stringset, u8 *data)
2300 {
2301         u8 *p = data;
2302         int i;
2303
2304         switch (stringset) {
2305         case ETH_SS_STATS:
2306                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2307                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2308                                ETH_GSTRING_LEN);
2309                         p += ETH_GSTRING_LEN;
2310                 }
2311
2312                 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
2313                         memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
2314                                ETH_GSTRING_LEN);
2315                         p += ETH_GSTRING_LEN;
2316                 }
2317
2318                 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
2319                         memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
2320                                ETH_GSTRING_LEN);
2321                         p += ETH_GSTRING_LEN;
2322                 }
2323
2324                 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
2325                         memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
2326                                ETH_GSTRING_LEN);
2327                         p += ETH_GSTRING_LEN;
2328                 }
2329
2330                 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
2331                         memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
2332                                ETH_GSTRING_LEN);
2333                         p += ETH_GSTRING_LEN;
2334                 }
2335
2336                 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2337                         mlxsw_sp_port_get_prio_strings(&p, i);
2338
2339                 for (i = 0; i < TC_MAX_QUEUE; i++)
2340                         mlxsw_sp_port_get_tc_strings(&p, i);
2341
2342                 break;
2343         }
2344 }
2345
2346 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2347                                      enum ethtool_phys_id_state state)
2348 {
2349         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2350         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2351         char mlcr_pl[MLXSW_REG_MLCR_LEN];
2352         bool active;
2353
2354         switch (state) {
2355         case ETHTOOL_ID_ACTIVE:
2356                 active = true;
2357                 break;
2358         case ETHTOOL_ID_INACTIVE:
2359                 active = false;
2360                 break;
2361         default:
2362                 return -EOPNOTSUPP;
2363         }
2364
2365         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2366         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2367 }
2368
2369 static int
2370 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2371                                int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2372 {
2373         switch (grp) {
2374         case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2375                 *p_hw_stats = mlxsw_sp_port_hw_stats;
2376                 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2377                 break;
2378         case MLXSW_REG_PPCNT_RFC_2863_CNT:
2379                 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
2380                 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
2381                 break;
2382         case MLXSW_REG_PPCNT_RFC_2819_CNT:
2383                 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
2384                 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2385                 break;
2386         case MLXSW_REG_PPCNT_RFC_3635_CNT:
2387                 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
2388                 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
2389                 break;
2390         case MLXSW_REG_PPCNT_DISCARD_CNT:
2391                 *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
2392                 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
2393                 break;
2394         case MLXSW_REG_PPCNT_PRIO_CNT:
2395                 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2396                 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2397                 break;
2398         case MLXSW_REG_PPCNT_TC_CNT:
2399                 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2400                 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2401                 break;
2402         default:
2403                 WARN_ON(1);
2404                 return -EOPNOTSUPP;
2405         }
2406         return 0;
2407 }
2408
2409 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2410                                       enum mlxsw_reg_ppcnt_grp grp, int prio,
2411                                       u64 *data, int data_index)
2412 {
2413         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2414         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2415         struct mlxsw_sp_port_hw_stats *hw_stats;
2416         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2417         int i, len;
2418         int err;
2419
2420         err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2421         if (err)
2422                 return;
2423         mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2424         for (i = 0; i < len; i++) {
2425                 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2426                 if (!hw_stats[i].cells_bytes)
2427                         continue;
2428                 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2429                                                             data[data_index + i]);
2430         }
2431 }
2432
2433 static void mlxsw_sp_port_get_stats(struct net_device *dev,
2434                                     struct ethtool_stats *stats, u64 *data)
2435 {
2436         int i, data_index = 0;
2437
2438         /* IEEE 802.3 Counters */
2439         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2440                                   data, data_index);
2441         data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2442
2443         /* RFC 2863 Counters */
2444         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
2445                                   data, data_index);
2446         data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
2447
2448         /* RFC 2819 Counters */
2449         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
2450                                   data, data_index);
2451         data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2452
2453         /* RFC 3635 Counters */
2454         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
2455                                   data, data_index);
2456         data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
2457
2458         /* Discard Counters */
2459         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
2460                                   data, data_index);
2461         data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
2462
2463         /* Per-Priority Counters */
2464         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2465                 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2466                                           data, data_index);
2467                 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2468         }
2469
2470         /* Per-TC Counters */
2471         for (i = 0; i < TC_MAX_QUEUE; i++) {
2472                 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2473                                           data, data_index);
2474                 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2475         }
2476 }
2477
2478 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2479 {
2480         switch (sset) {
2481         case ETH_SS_STATS:
2482                 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2483         default:
2484                 return -EOPNOTSUPP;
2485         }
2486 }
2487
2488 struct mlxsw_sp1_port_link_mode {
2489         enum ethtool_link_mode_bit_indices mask_ethtool;
2490         u32 mask;
2491         u32 speed;
2492 };
2493
2494 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
2495         {
2496                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2497                 .mask_ethtool   = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2498                 .speed          = SPEED_100,
2499         },
2500         {
2501                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2502                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2503                 .mask_ethtool   = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2504                 .speed          = SPEED_1000,
2505         },
2506         {
2507                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2508                 .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2509                 .speed          = SPEED_10000,
2510         },
2511         {
2512                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2513                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2514                 .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2515                 .speed          = SPEED_10000,
2516         },
2517         {
2518                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2519                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2520                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2521                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2522                 .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2523                 .speed          = SPEED_10000,
2524         },
2525         {
2526                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2527                 .mask_ethtool   = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2528                 .speed          = SPEED_20000,
2529         },
2530         {
2531                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2532                 .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2533                 .speed          = SPEED_40000,
2534         },
2535         {
2536                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2537                 .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2538                 .speed          = SPEED_40000,
2539         },
2540         {
2541                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2542                 .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2543                 .speed          = SPEED_40000,
2544         },
2545         {
2546                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2547                 .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2548                 .speed          = SPEED_40000,
2549         },
2550         {
2551                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2552                 .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2553                 .speed          = SPEED_25000,
2554         },
2555         {
2556                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2557                 .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2558                 .speed          = SPEED_25000,
2559         },
2560         {
2561                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2562                 .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2563                 .speed          = SPEED_25000,
2564         },
2565         {
2566                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2567                 .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2568                 .speed          = SPEED_50000,
2569         },
2570         {
2571                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2572                 .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2573                 .speed          = SPEED_50000,
2574         },
2575         {
2576                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2577                 .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2578                 .speed          = SPEED_50000,
2579         },
2580         {
2581                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2582                 .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2583                 .speed          = SPEED_56000,
2584         },
2585         {
2586                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2587                 .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2588                 .speed          = SPEED_56000,
2589         },
2590         {
2591                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2592                 .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2593                 .speed          = SPEED_56000,
2594         },
2595         {
2596                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2597                 .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2598                 .speed          = SPEED_56000,
2599         },
2600         {
2601                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2602                 .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2603                 .speed          = SPEED_100000,
2604         },
2605         {
2606                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2607                 .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2608                 .speed          = SPEED_100000,
2609         },
2610         {
2611                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2612                 .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2613                 .speed          = SPEED_100000,
2614         },
2615         {
2616                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2617                 .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2618                 .speed          = SPEED_100000,
2619         },
2620 };
2621
2622 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
2623
2624 static void
2625 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
2626                                    u32 ptys_eth_proto,
2627                                    struct ethtool_link_ksettings *cmd)
2628 {
2629         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2630                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2631                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2632                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2633                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2634                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2635                 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2636
2637         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2638                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2639                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2640                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2641                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2642                 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2643 }
2644
2645 static void
2646 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
2647                          unsigned long *mode)
2648 {
2649         int i;
2650
2651         for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2652                 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
2653                         __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
2654                                   mode);
2655         }
2656 }
2657
2658 static void
2659 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
2660                                  u32 ptys_eth_proto,
2661                                  struct ethtool_link_ksettings *cmd)
2662 {
2663         u32 speed = SPEED_UNKNOWN;
2664         u8 duplex = DUPLEX_UNKNOWN;
2665         int i;
2666
2667         if (!carrier_ok)
2668                 goto out;
2669
2670         for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2671                 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
2672                         speed = mlxsw_sp1_port_link_mode[i].speed;
2673                         duplex = DUPLEX_FULL;
2674                         break;
2675                 }
2676         }
2677 out:
2678         cmd->base.speed = speed;
2679         cmd->base.duplex = duplex;
2680 }
2681
2682 static u32
2683 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
2684                               const struct ethtool_link_ksettings *cmd)
2685 {
2686         u32 ptys_proto = 0;
2687         int i;
2688
2689         for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2690                 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
2691                              cmd->link_modes.advertising))
2692                         ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
2693         }
2694         return ptys_proto;
2695 }
2696
2697 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
2698 {
2699         u32 ptys_proto = 0;
2700         int i;
2701
2702         for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2703                 if (speed == mlxsw_sp1_port_link_mode[i].speed)
2704                         ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
2705         }
2706         return ptys_proto;
2707 }
2708
2709 static u32
2710 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
2711 {
2712         u32 ptys_proto = 0;
2713         int i;
2714
2715         for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
2716                 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed)
2717                         ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
2718         }
2719         return ptys_proto;
2720 }
2721
2722 static int
2723 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2724                           u32 *base_speed)
2725 {
2726         *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
2727         return 0;
2728 }
2729
2730 static void
2731 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
2732                             u8 local_port, u32 proto_admin, bool autoneg)
2733 {
2734         mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg);
2735 }
2736
2737 static void
2738 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
2739                               u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
2740                               u32 *p_eth_proto_oper)
2741 {
2742         mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin,
2743                                   p_eth_proto_oper);
2744 }
2745
2746 static const struct mlxsw_sp_port_type_speed_ops
2747 mlxsw_sp1_port_type_speed_ops = {
2748         .from_ptys_supported_port       = mlxsw_sp1_from_ptys_supported_port,
2749         .from_ptys_link                 = mlxsw_sp1_from_ptys_link,
2750         .from_ptys_speed_duplex         = mlxsw_sp1_from_ptys_speed_duplex,
2751         .to_ptys_advert_link            = mlxsw_sp1_to_ptys_advert_link,
2752         .to_ptys_speed                  = mlxsw_sp1_to_ptys_speed,
2753         .to_ptys_upper_speed            = mlxsw_sp1_to_ptys_upper_speed,
2754         .port_speed_base                = mlxsw_sp1_port_speed_base,
2755         .reg_ptys_eth_pack              = mlxsw_sp1_reg_ptys_eth_pack,
2756         .reg_ptys_eth_unpack            = mlxsw_sp1_reg_ptys_eth_unpack,
2757 };
2758
2759 static const enum ethtool_link_mode_bit_indices
2760 mlxsw_sp2_mask_ethtool_sgmii_100m[] = {
2761         ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2762 };
2763
2764 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \
2765         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m)
2766
2767 static const enum ethtool_link_mode_bit_indices
2768 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = {
2769         ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
2770         ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2771 };
2772
2773 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \
2774         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
2775
2776 static const enum ethtool_link_mode_bit_indices
2777 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = {
2778         ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
2779 };
2780
2781 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \
2782         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii)
2783
2784 static const enum ethtool_link_mode_bit_indices
2785 mlxsw_sp2_mask_ethtool_5gbase_r[] = {
2786         ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
2787 };
2788
2789 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \
2790         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r)
2791
2792 static const enum ethtool_link_mode_bit_indices
2793 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = {
2794         ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2795         ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2796         ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
2797         ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
2798         ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
2799         ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
2800         ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
2801 };
2802
2803 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \
2804         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g)
2805
2806 static const enum ethtool_link_mode_bit_indices
2807 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = {
2808         ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2809         ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2810         ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2811         ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2812 };
2813
2814 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \
2815         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g)
2816
2817 static const enum ethtool_link_mode_bit_indices
2818 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = {
2819         ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2820         ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2821         ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2822 };
2823
2824 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \
2825         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr)
2826
2827 static const enum ethtool_link_mode_bit_indices
2828 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = {
2829         ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2830         ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2831         ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2832 };
2833
2834 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \
2835         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2)
2836
2837 static const enum ethtool_link_mode_bit_indices
2838 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = {
2839         ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
2840         ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
2841         ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
2842         ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
2843         ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
2844 };
2845
2846 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \
2847         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr)
2848
2849 static const enum ethtool_link_mode_bit_indices
2850 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = {
2851         ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2852         ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2853         ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2854         ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2855 };
2856
2857 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \
2858         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4)
2859
2860 static const enum ethtool_link_mode_bit_indices
2861 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = {
2862         ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
2863         ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
2864         ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
2865         ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
2866         ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
2867 };
2868
2869 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \
2870         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
2871
2872 static const enum ethtool_link_mode_bit_indices
2873 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
2874         ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
2875         ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
2876         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
2877         ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
2878         ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
2879 };
2880
2881 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
2882         ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
2883
2884 struct mlxsw_sp2_port_link_mode {
2885         const enum ethtool_link_mode_bit_indices *mask_ethtool;
2886         int m_ethtool_len;
2887         u32 mask;
2888         u32 speed;
2889 };
2890
2891 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
2892         {
2893                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
2894                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_sgmii_100m,
2895                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
2896                 .speed          = SPEED_100,
2897         },
2898         {
2899                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
2900                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
2901                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
2902                 .speed          = SPEED_1000,
2903         },
2904         {
2905                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
2906                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
2907                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
2908                 .speed          = SPEED_2500,
2909         },
2910         {
2911                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
2912                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_5gbase_r,
2913                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
2914                 .speed          = SPEED_5000,
2915         },
2916         {
2917                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
2918                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
2919                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
2920                 .speed          = SPEED_10000,
2921         },
2922         {
2923                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
2924                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
2925                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
2926                 .speed          = SPEED_40000,
2927         },
2928         {
2929                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
2930                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
2931                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
2932                 .speed          = SPEED_25000,
2933         },
2934         {
2935                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
2936                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
2937                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
2938                 .speed          = SPEED_50000,
2939         },
2940         {
2941                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
2942                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
2943                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
2944                 .speed          = SPEED_50000,
2945         },
2946         {
2947                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
2948                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
2949                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
2950                 .speed          = SPEED_100000,
2951         },
2952         {
2953                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
2954                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
2955                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
2956                 .speed          = SPEED_100000,
2957         },
2958         {
2959                 .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
2960                 .mask_ethtool   = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
2961                 .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
2962                 .speed          = SPEED_200000,
2963         },
2964 };
2965
2966 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
2967
2968 static void
2969 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
2970                                    u32 ptys_eth_proto,
2971                                    struct ethtool_link_ksettings *cmd)
2972 {
2973         ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2974         ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2975 }
2976
2977 static void
2978 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
2979                           unsigned long *mode)
2980 {
2981         int i;
2982
2983         for (i = 0; i < link_mode->m_ethtool_len; i++)
2984                 __set_bit(link_mode->mask_ethtool[i], mode);
2985 }
2986
2987 static void
2988 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
2989                          unsigned long *mode)
2990 {
2991         int i;
2992
2993         for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
2994                 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
2995                         mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
2996                                                   mode);
2997         }
2998 }
2999
3000 static void
3001 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
3002                                  u32 ptys_eth_proto,
3003                                  struct ethtool_link_ksettings *cmd)
3004 {
3005         u32 speed = SPEED_UNKNOWN;
3006         u8 duplex = DUPLEX_UNKNOWN;
3007         int i;
3008
3009         if (!carrier_ok)
3010                 goto out;
3011
3012         for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
3013                 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
3014                         speed = mlxsw_sp2_port_link_mode[i].speed;
3015                         duplex = DUPLEX_FULL;
3016                         break;
3017                 }
3018         }
3019 out:
3020         cmd->base.speed = speed;
3021         cmd->base.duplex = duplex;
3022 }
3023
3024 static bool
3025 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
3026                            const unsigned long *mode)
3027 {
3028         int cnt = 0;
3029         int i;
3030
3031         for (i = 0; i < link_mode->m_ethtool_len; i++) {
3032                 if (test_bit(link_mode->mask_ethtool[i], mode))
3033                         cnt++;
3034         }
3035
3036         return cnt == link_mode->m_ethtool_len;
3037 }
3038
3039 static u32
3040 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
3041                               const struct ethtool_link_ksettings *cmd)
3042 {
3043         u32 ptys_proto = 0;
3044         int i;
3045
3046         for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
3047                 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
3048                                                cmd->link_modes.advertising))
3049                         ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
3050         }
3051         return ptys_proto;
3052 }
3053
3054 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
3055 {
3056         u32 ptys_proto = 0;
3057         int i;
3058
3059         for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
3060                 if (speed == mlxsw_sp2_port_link_mode[i].speed)
3061                         ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
3062         }
3063         return ptys_proto;
3064 }
3065
3066 static u32
3067 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
3068 {
3069         u32 ptys_proto = 0;
3070         int i;
3071
3072         for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
3073                 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed)
3074                         ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
3075         }
3076         return ptys_proto;
3077 }
3078
3079 static int
3080 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
3081                           u32 *base_speed)
3082 {
3083         char ptys_pl[MLXSW_REG_PTYS_LEN];
3084         u32 eth_proto_cap;
3085         int err;
3086
3087         /* In Spectrum-2, the speed of 1x can change from port to port, so query
3088          * it from firmware.
3089          */
3090         mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false);
3091         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
3092         if (err)
3093                 return err;
3094         mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
3095
3096         if (eth_proto_cap &
3097             MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) {
3098                 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G;
3099                 return 0;
3100         }
3101
3102         if (eth_proto_cap &
3103             MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) {
3104                 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
3105                 return 0;
3106         }
3107
3108         return -EIO;
3109 }
3110
3111 static void
3112 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
3113                             u8 local_port, u32 proto_admin,
3114                             bool autoneg)
3115 {
3116         mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg);
3117 }
3118
3119 static void
3120 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
3121                               u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
3122                               u32 *p_eth_proto_oper)
3123 {
3124         mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap,
3125                                       p_eth_proto_admin, p_eth_proto_oper);
3126 }
3127
3128 static const struct mlxsw_sp_port_type_speed_ops
3129 mlxsw_sp2_port_type_speed_ops = {
3130         .from_ptys_supported_port       = mlxsw_sp2_from_ptys_supported_port,
3131         .from_ptys_link                 = mlxsw_sp2_from_ptys_link,
3132         .from_ptys_speed_duplex         = mlxsw_sp2_from_ptys_speed_duplex,
3133         .to_ptys_advert_link            = mlxsw_sp2_to_ptys_advert_link,
3134         .to_ptys_speed                  = mlxsw_sp2_to_ptys_speed,
3135         .to_ptys_upper_speed            = mlxsw_sp2_to_ptys_upper_speed,
3136         .port_speed_base                = mlxsw_sp2_port_speed_base,
3137         .reg_ptys_eth_pack              = mlxsw_sp2_reg_ptys_eth_pack,
3138         .reg_ptys_eth_unpack            = mlxsw_sp2_reg_ptys_eth_unpack,
3139 };
3140
3141 static void
3142 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
3143                                  struct ethtool_link_ksettings *cmd)
3144 {
3145         const struct mlxsw_sp_port_type_speed_ops *ops;
3146
3147         ops = mlxsw_sp->port_type_speed_ops;
3148
3149         ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
3150         ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
3151         ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
3152
3153         ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
3154         ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported);
3155 }
3156
3157 static void
3158 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
3159                                  u32 eth_proto_admin, bool autoneg,
3160                                  struct ethtool_link_ksettings *cmd)
3161 {
3162         const struct mlxsw_sp_port_type_speed_ops *ops;
3163
3164         ops = mlxsw_sp->port_type_speed_ops;
3165
3166         if (!autoneg)
3167                 return;
3168
3169         ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
3170         ops->from_ptys_link(mlxsw_sp, eth_proto_admin,
3171                             cmd->link_modes.advertising);
3172 }
3173
3174 static u8
3175 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type)
3176 {
3177         switch (connector_type) {
3178         case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR:
3179                 return PORT_OTHER;
3180         case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE:
3181                 return PORT_NONE;
3182         case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP:
3183                 return PORT_TP;
3184         case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI:
3185                 return PORT_AUI;
3186         case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC:
3187                 return PORT_BNC;
3188         case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII:
3189                 return PORT_MII;
3190         case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE:
3191                 return PORT_FIBRE;
3192         case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA:
3193                 return PORT_DA;
3194         case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER:
3195                 return PORT_OTHER;
3196         default:
3197                 WARN_ON_ONCE(1);
3198                 return PORT_OTHER;
3199         }
3200 }
3201
3202 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
3203                                             struct ethtool_link_ksettings *cmd)
3204 {
3205         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
3206         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3207         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3208         const struct mlxsw_sp_port_type_speed_ops *ops;
3209         char ptys_pl[MLXSW_REG_PTYS_LEN];
3210         u8 connector_type;
3211         bool autoneg;
3212         int err;
3213
3214         ops = mlxsw_sp->port_type_speed_ops;
3215
3216         autoneg = mlxsw_sp_port->link.autoneg;
3217         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
3218                                0, false);
3219         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
3220         if (err)
3221                 return err;
3222         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
3223                                  &eth_proto_admin, &eth_proto_oper);
3224
3225         mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd);
3226
3227         mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg,
3228                                          cmd);
3229
3230         cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3231         connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
3232         cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
3233         ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
3234                                     eth_proto_oper, cmd);
3235
3236         return 0;
3237 }
3238
3239 static int
3240 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
3241                                  const struct ethtool_link_ksettings *cmd)
3242 {
3243         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3244         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3245         const struct mlxsw_sp_port_type_speed_ops *ops;
3246         char ptys_pl[MLXSW_REG_PTYS_LEN];
3247         u32 eth_proto_cap, eth_proto_new;
3248         bool autoneg;
3249         int err;
3250
3251         ops = mlxsw_sp->port_type_speed_ops;
3252
3253         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
3254                                0, false);
3255         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
3256         if (err)
3257                 return err;
3258         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap, NULL, NULL);
3259
3260         autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
3261         if (!autoneg && cmd->base.speed == SPEED_56000) {
3262                 netdev_err(dev, "56G not supported with autoneg off\n");
3263                 return -EINVAL;
3264         }
3265         eth_proto_new = autoneg ?
3266                 ops->to_ptys_advert_link(mlxsw_sp, cmd) :
3267                 ops->to_ptys_speed(mlxsw_sp, cmd->base.speed);
3268
3269         eth_proto_new = eth_proto_new & eth_proto_cap;
3270         if (!eth_proto_new) {
3271                 netdev_err(dev, "No supported speed requested\n");
3272                 return -EINVAL;
3273         }
3274
3275         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
3276                                eth_proto_new, autoneg);
3277         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
3278         if (err)
3279                 return err;
3280
3281         mlxsw_sp_port->link.autoneg = autoneg;
3282
3283         if (!netif_running(dev))
3284                 return 0;
3285
3286         mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
3287         mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
3288
3289         return 0;
3290 }
3291
3292 static int mlxsw_sp_get_module_info(struct net_device *netdev,
3293                                     struct ethtool_modinfo *modinfo)
3294 {
3295         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
3296         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3297         int err;
3298
3299         err = mlxsw_env_get_module_info(mlxsw_sp->core,
3300                                         mlxsw_sp_port->mapping.module,
3301                                         modinfo);
3302
3303         return err;
3304 }
3305
3306 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
3307                                       struct ethtool_eeprom *ee,
3308                                       u8 *data)
3309 {
3310         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
3311         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3312         int err;
3313
3314         err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
3315                                           mlxsw_sp_port->mapping.module, ee,
3316                                           data);
3317
3318         return err;
3319 }
3320
3321 static int
3322 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
3323 {
3324         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
3325         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3326
3327         return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info);
3328 }
3329
3330 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
3331         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
3332         .get_link               = ethtool_op_get_link,
3333         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
3334         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
3335         .get_strings            = mlxsw_sp_port_get_strings,
3336         .set_phys_id            = mlxsw_sp_port_set_phys_id,
3337         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
3338         .get_sset_count         = mlxsw_sp_port_get_sset_count,
3339         .get_link_ksettings     = mlxsw_sp_port_get_link_ksettings,
3340         .set_link_ksettings     = mlxsw_sp_port_set_link_ksettings,
3341         .get_module_info        = mlxsw_sp_get_module_info,
3342         .get_module_eeprom      = mlxsw_sp_get_module_eeprom,
3343         .get_ts_info            = mlxsw_sp_get_ts_info,
3344 };
3345
3346 static int
3347 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
3348 {
3349         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3350         const struct mlxsw_sp_port_type_speed_ops *ops;
3351         char ptys_pl[MLXSW_REG_PTYS_LEN];
3352         u32 eth_proto_admin;
3353         u32 upper_speed;
3354         u32 base_speed;
3355         int err;
3356
3357         ops = mlxsw_sp->port_type_speed_ops;
3358
3359         err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port,
3360                                    &base_speed);
3361         if (err)
3362                 return err;
3363         upper_speed = base_speed * width;
3364
3365         eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed);
3366         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
3367                                eth_proto_admin, mlxsw_sp_port->link.autoneg);
3368         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
3369 }
3370
3371 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
3372                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
3373                           bool dwrr, u8 dwrr_weight)
3374 {
3375         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3376         char qeec_pl[MLXSW_REG_QEEC_LEN];
3377
3378         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
3379                             next_index);
3380         mlxsw_reg_qeec_de_set(qeec_pl, true);
3381         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
3382         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
3383         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
3384 }
3385
3386 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
3387                                   enum mlxsw_reg_qeec_hr hr, u8 index,
3388                                   u8 next_index, u32 maxrate)
3389 {
3390         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3391         char qeec_pl[MLXSW_REG_QEEC_LEN];
3392
3393         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
3394                             next_index);
3395         mlxsw_reg_qeec_mase_set(qeec_pl, true);
3396         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
3397         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
3398 }
3399
3400 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
3401                                     enum mlxsw_reg_qeec_hr hr, u8 index,
3402                                     u8 next_index, u32 minrate)
3403 {
3404         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3405         char qeec_pl[MLXSW_REG_QEEC_LEN];
3406
3407         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
3408                             next_index);
3409         mlxsw_reg_qeec_mise_set(qeec_pl, true);
3410         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
3411
3412         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
3413 }
3414
3415 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
3416                               u8 switch_prio, u8 tclass)
3417 {
3418         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3419         char qtct_pl[MLXSW_REG_QTCT_LEN];
3420
3421         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
3422                             tclass);
3423         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
3424 }
3425
3426 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
3427 {
3428         int err, i;
3429
3430         /* Setup the elements hierarcy, so that each TC is linked to
3431          * one subgroup, which are all member in the same group.
3432          */
3433         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
3434                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
3435                                     0);
3436         if (err)
3437                 return err;
3438         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3439                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
3440                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
3441                                             0, false, 0);
3442                 if (err)
3443                         return err;
3444         }
3445         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3446                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
3447                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
3448                                             false, 0);
3449                 if (err)
3450                         return err;
3451
3452                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
3453                                             MLXSW_REG_QEEC_HIERARCY_TC,
3454                                             i + 8, i,
3455                                             true, 100);
3456                 if (err)
3457                         return err;
3458         }
3459
3460         /* Make sure the max shaper is disabled in all hierarchies that support
3461          * it. Note that this disables ptps (PTP shaper), but that is intended
3462          * for the initial configuration.
3463          */
3464         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
3465                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
3466                                             MLXSW_REG_QEEC_MAS_DIS);
3467         if (err)
3468                 return err;
3469         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3470                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
3471                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
3472                                                     i, 0,
3473                                                     MLXSW_REG_QEEC_MAS_DIS);
3474                 if (err)
3475                         return err;
3476         }
3477         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3478                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
3479                                                     MLXSW_REG_QEEC_HIERARCY_TC,
3480                                                     i, i,
3481                                                     MLXSW_REG_QEEC_MAS_DIS);
3482                 if (err)
3483                         return err;
3484
3485                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
3486                                                     MLXSW_REG_QEEC_HIERARCY_TC,
3487                                                     i + 8, i,
3488                                                     MLXSW_REG_QEEC_MAS_DIS);
3489                 if (err)
3490                         return err;
3491         }
3492
3493         /* Configure the min shaper for multicast TCs. */
3494         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3495                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
3496                                                MLXSW_REG_QEEC_HIERARCY_TC,
3497                                                i + 8, i,
3498                                                MLXSW_REG_QEEC_MIS_MIN);
3499                 if (err)
3500                         return err;
3501         }
3502
3503         /* Map all priorities to traffic class 0. */
3504         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3505                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
3506                 if (err)
3507                         return err;
3508         }
3509
3510         return 0;
3511 }
3512
3513 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
3514                                         bool enable)
3515 {
3516         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3517         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
3518
3519         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
3520         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
3521 }
3522
3523 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
3524                                 bool split, u8 module, u8 width, u8 lane)
3525 {
3526         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3527         struct mlxsw_sp_port *mlxsw_sp_port;
3528         struct net_device *dev;
3529         int err;
3530
3531         err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
3532                                    module + 1, split, lane / width,
3533                                    mlxsw_sp->base_mac,
3534                                    sizeof(mlxsw_sp->base_mac));
3535         if (err) {
3536                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
3537                         local_port);
3538                 return err;
3539         }
3540
3541         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
3542         if (!dev) {
3543                 err = -ENOMEM;
3544                 goto err_alloc_etherdev;
3545         }
3546         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
3547         mlxsw_sp_port = netdev_priv(dev);
3548         mlxsw_sp_port->dev = dev;
3549         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
3550         mlxsw_sp_port->local_port = local_port;
3551         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
3552         mlxsw_sp_port->split = split;
3553         mlxsw_sp_port->mapping.module = module;
3554         mlxsw_sp_port->mapping.width = width;
3555         mlxsw_sp_port->mapping.lane = lane;
3556         mlxsw_sp_port->link.autoneg = 1;
3557         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
3558         INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
3559
3560         mlxsw_sp_port->pcpu_stats =
3561                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
3562         if (!mlxsw_sp_port->pcpu_stats) {
3563                 err = -ENOMEM;
3564                 goto err_alloc_stats;
3565         }
3566
3567         mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
3568                                         GFP_KERNEL);
3569         if (!mlxsw_sp_port->sample) {
3570                 err = -ENOMEM;
3571                 goto err_alloc_sample;
3572         }
3573
3574         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
3575                           &update_stats_cache);
3576
3577         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
3578         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
3579
3580         err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
3581         if (err) {
3582                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
3583                         mlxsw_sp_port->local_port);
3584                 goto err_port_module_map;
3585         }
3586
3587         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
3588         if (err) {
3589                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
3590                         mlxsw_sp_port->local_port);
3591                 goto err_port_swid_set;
3592         }
3593
3594         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
3595         if (err) {
3596                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
3597                         mlxsw_sp_port->local_port);
3598                 goto err_dev_addr_init;
3599         }
3600
3601         netif_carrier_off(dev);
3602
3603         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
3604                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
3605         dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
3606
3607         dev->min_mtu = 0;
3608         dev->max_mtu = ETH_MAX_MTU;
3609
3610         /* Each packet needs to have a Tx header (metadata) on top all other
3611          * headers.
3612          */
3613         dev->needed_headroom = MLXSW_TXHDR_LEN;
3614
3615         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
3616         if (err) {
3617                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
3618                         mlxsw_sp_port->local_port);
3619                 goto err_port_system_port_mapping_set;
3620         }
3621
3622         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
3623         if (err) {
3624                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
3625                         mlxsw_sp_port->local_port);
3626                 goto err_port_speed_by_width_set;
3627         }
3628
3629         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
3630         if (err) {
3631                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
3632                         mlxsw_sp_port->local_port);
3633                 goto err_port_mtu_set;
3634         }
3635
3636         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
3637         if (err)
3638                 goto err_port_admin_status_set;
3639
3640         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
3641         if (err) {
3642                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
3643                         mlxsw_sp_port->local_port);
3644                 goto err_port_buffers_init;
3645         }
3646
3647         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
3648         if (err) {
3649                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
3650                         mlxsw_sp_port->local_port);
3651                 goto err_port_ets_init;
3652         }
3653
3654         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
3655         if (err) {
3656                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
3657                         mlxsw_sp_port->local_port);
3658                 goto err_port_tc_mc_mode;
3659         }
3660
3661         /* ETS and buffers must be initialized before DCB. */
3662         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
3663         if (err) {
3664                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
3665                         mlxsw_sp_port->local_port);
3666                 goto err_port_dcb_init;
3667         }
3668
3669         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
3670         if (err) {
3671                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
3672                         mlxsw_sp_port->local_port);
3673                 goto err_port_fids_init;
3674         }
3675
3676         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
3677         if (err) {
3678                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
3679                         mlxsw_sp_port->local_port);
3680                 goto err_port_qdiscs_init;
3681         }
3682
3683         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
3684         if (err) {
3685                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
3686                         mlxsw_sp_port->local_port);
3687                 goto err_port_nve_init;
3688         }
3689
3690         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3691         if (err) {
3692                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
3693                         mlxsw_sp_port->local_port);
3694                 goto err_port_pvid_set;
3695         }
3696
3697         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
3698                                                        MLXSW_SP_DEFAULT_VID);
3699         if (IS_ERR(mlxsw_sp_port_vlan)) {
3700                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
3701                         mlxsw_sp_port->local_port);
3702                 err = PTR_ERR(mlxsw_sp_port_vlan);
3703                 goto err_port_vlan_create;
3704         }
3705         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
3706
3707         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
3708         err = register_netdev(dev);
3709         if (err) {
3710                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
3711                         mlxsw_sp_port->local_port);
3712                 goto err_register_netdev;
3713         }
3714
3715         mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
3716                                 mlxsw_sp_port, dev);
3717         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
3718         return 0;
3719
3720 err_register_netdev:
3721         mlxsw_sp->ports[local_port] = NULL;
3722         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
3723 err_port_vlan_create:
3724 err_port_pvid_set:
3725         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
3726 err_port_nve_init:
3727         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3728 err_port_qdiscs_init:
3729         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3730 err_port_fids_init:
3731         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3732 err_port_dcb_init:
3733         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3734 err_port_tc_mc_mode:
3735 err_port_ets_init:
3736 err_port_buffers_init:
3737 err_port_admin_status_set:
3738 err_port_mtu_set:
3739 err_port_speed_by_width_set:
3740 err_port_system_port_mapping_set:
3741 err_dev_addr_init:
3742         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3743 err_port_swid_set:
3744         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3745 err_port_module_map:
3746         kfree(mlxsw_sp_port->sample);
3747 err_alloc_sample:
3748         free_percpu(mlxsw_sp_port->pcpu_stats);
3749 err_alloc_stats:
3750         free_netdev(dev);
3751 err_alloc_etherdev:
3752         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3753         return err;
3754 }
3755
3756 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3757 {
3758         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3759
3760         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
3761         mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
3762         mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3763         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3764         mlxsw_sp->ports[local_port] = NULL;
3765         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
3766         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
3767         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3768         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3769         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3770         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3771         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3772         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3773         kfree(mlxsw_sp_port->sample);
3774         free_percpu(mlxsw_sp_port->pcpu_stats);
3775         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
3776         free_netdev(mlxsw_sp_port->dev);
3777         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3778 }
3779
3780 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3781 {
3782         return mlxsw_sp->ports[local_port] != NULL;
3783 }
3784
3785 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3786 {
3787         int i;
3788
3789         for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
3790                 if (mlxsw_sp_port_created(mlxsw_sp, i))
3791                         mlxsw_sp_port_remove(mlxsw_sp, i);
3792         kfree(mlxsw_sp->port_to_module);
3793         kfree(mlxsw_sp->ports);
3794 }
3795
3796 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3797 {
3798         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3799         u8 module, width, lane;
3800         size_t alloc_size;
3801         int i;
3802         int err;
3803
3804         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3805         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3806         if (!mlxsw_sp->ports)
3807                 return -ENOMEM;
3808
3809         mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
3810                                                  GFP_KERNEL);
3811         if (!mlxsw_sp->port_to_module) {
3812                 err = -ENOMEM;
3813                 goto err_port_to_module_alloc;
3814         }
3815
3816         for (i = 1; i < max_ports; i++) {
3817                 /* Mark as invalid */
3818                 mlxsw_sp->port_to_module[i] = -1;
3819
3820                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3821                                                     &width, &lane);
3822                 if (err)
3823                         goto err_port_module_info_get;
3824                 if (!width)
3825                         continue;
3826                 mlxsw_sp->port_to_module[i] = module;
3827                 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3828                                            module, width, lane);
3829                 if (err)
3830                         goto err_port_create;
3831         }
3832         return 0;
3833
3834 err_port_create:
3835 err_port_module_info_get:
3836         for (i--; i >= 1; i--)
3837                 if (mlxsw_sp_port_created(mlxsw_sp, i))
3838                         mlxsw_sp_port_remove(mlxsw_sp, i);
3839         kfree(mlxsw_sp->port_to_module);
3840 err_port_to_module_alloc:
3841         kfree(mlxsw_sp->ports);
3842         return err;
3843 }
3844
3845 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3846 {
3847         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3848
3849         return local_port - offset;
3850 }
3851
3852 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3853                                       u8 module, unsigned int count, u8 offset)
3854 {
3855         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3856         int err, i;
3857
3858         for (i = 0; i < count; i++) {
3859                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
3860                                            true, module, width, i * width);
3861                 if (err)
3862                         goto err_port_create;
3863         }
3864
3865         return 0;
3866
3867 err_port_create:
3868         for (i--; i >= 0; i--)
3869                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
3870                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
3871         return err;
3872 }
3873
3874 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3875                                          u8 base_port, unsigned int count)
3876 {
3877         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3878         int i;
3879
3880         /* Split by four means we need to re-create two ports, otherwise
3881          * only one.
3882          */
3883         count = count / 2;
3884
3885         for (i = 0; i < count; i++) {
3886                 local_port = base_port + i * 2;
3887                 if (mlxsw_sp->port_to_module[local_port] < 0)
3888                         continue;
3889                 module = mlxsw_sp->port_to_module[local_port];
3890
3891                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3892                                      width, 0);
3893         }
3894 }
3895
3896 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3897                                unsigned int count,
3898                                struct netlink_ext_ack *extack)
3899 {
3900         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3901         u8 local_ports_in_1x, local_ports_in_2x, offset;
3902         struct mlxsw_sp_port *mlxsw_sp_port;
3903         u8 module, cur_width, base_port;
3904         int i;
3905         int err;
3906
3907         if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
3908             !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
3909                 return -EIO;
3910
3911         local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
3912         local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
3913
3914         mlxsw_sp_port = mlxsw_sp->ports[local_port];
3915         if (!mlxsw_sp_port) {
3916                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3917                         local_port);
3918                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3919                 return -EINVAL;
3920         }
3921
3922         module = mlxsw_sp_port->mapping.module;
3923         cur_width = mlxsw_sp_port->mapping.width;
3924
3925         if (count != 2 && count != 4) {
3926                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3927                 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
3928                 return -EINVAL;
3929         }
3930
3931         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3932                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3933                 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
3934                 return -EINVAL;
3935         }
3936
3937         /* Make sure we have enough slave (even) ports for the split. */
3938         if (count == 2) {
3939                 offset = local_ports_in_2x;
3940                 base_port = local_port;
3941                 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) {
3942                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3943                         NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3944                         return -EINVAL;
3945                 }
3946         } else {
3947                 offset = local_ports_in_1x;
3948                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3949                 if (mlxsw_sp->ports[base_port + 1] ||
3950                     mlxsw_sp->ports[base_port + 3]) {
3951                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3952                         NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3953                         return -EINVAL;
3954                 }
3955         }
3956
3957         for (i = 0; i < count; i++)
3958                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
3959                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
3960
3961         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count,
3962                                          offset);
3963         if (err) {
3964                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3965                 goto err_port_split_create;
3966         }
3967
3968         return 0;
3969
3970 err_port_split_create:
3971         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3972         return err;
3973 }
3974
3975 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
3976                                  struct netlink_ext_ack *extack)
3977 {
3978         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3979         u8 local_ports_in_1x, local_ports_in_2x, offset;
3980         struct mlxsw_sp_port *mlxsw_sp_port;
3981         u8 cur_width, base_port;
3982         unsigned int count;
3983         int i;
3984
3985         if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
3986             !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
3987                 return -EIO;
3988
3989         local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
3990         local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
3991
3992         mlxsw_sp_port = mlxsw_sp->ports[local_port];
3993         if (!mlxsw_sp_port) {
3994                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3995                         local_port);
3996                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3997                 return -EINVAL;
3998         }
3999
4000         if (!mlxsw_sp_port->split) {
4001                 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
4002                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
4003                 return -EINVAL;
4004         }
4005
4006         cur_width = mlxsw_sp_port->mapping.width;
4007         count = cur_width == 1 ? 4 : 2;
4008
4009         if (count == 2)
4010                 offset = local_ports_in_2x;
4011         else
4012                 offset = local_ports_in_1x;
4013
4014         base_port = mlxsw_sp_cluster_base_port_get(local_port);
4015
4016         /* Determine which ports to remove. */
4017         if (count == 2 && local_port >= base_port + 2)
4018                 base_port = base_port + 2;
4019
4020         for (i = 0; i < count; i++)
4021                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
4022                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
4023
4024         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
4025
4026         return 0;
4027 }
4028
4029 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
4030                                      char *pude_pl, void *priv)
4031 {
4032         struct mlxsw_sp *mlxsw_sp = priv;
4033         struct mlxsw_sp_port *mlxsw_sp_port;
4034         enum mlxsw_reg_pude_oper_status status;
4035         u8 local_port;
4036
4037         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
4038         mlxsw_sp_port = mlxsw_sp->ports[local_port];
4039         if (!mlxsw_sp_port)
4040                 return;
4041
4042         status = mlxsw_reg_pude_oper_status_get(pude_pl);
4043         if (status == MLXSW_PORT_OPER_STATUS_UP) {
4044                 netdev_info(mlxsw_sp_port->dev, "link up\n");
4045                 netif_carrier_on(mlxsw_sp_port->dev);
4046         } else {
4047                 netdev_info(mlxsw_sp_port->dev, "link down\n");
4048                 netif_carrier_off(mlxsw_sp_port->dev);
4049         }
4050 }
4051
4052 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
4053                                           char *mtpptr_pl, bool ingress)
4054 {
4055         u8 local_port;
4056         u8 num_rec;
4057         int i;
4058
4059         local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
4060         num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
4061         for (i = 0; i < num_rec; i++) {
4062                 u8 domain_number;
4063                 u8 message_type;
4064                 u16 sequence_id;
4065                 u64 timestamp;
4066
4067                 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
4068                                         &domain_number, &sequence_id,
4069                                         &timestamp);
4070                 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
4071                                             message_type, domain_number,
4072                                             sequence_id, timestamp);
4073         }
4074 }
4075
4076 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
4077                                               char *mtpptr_pl, void *priv)
4078 {
4079         struct mlxsw_sp *mlxsw_sp = priv;
4080
4081         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
4082 }
4083
4084 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
4085                                               char *mtpptr_pl, void *priv)
4086 {
4087         struct mlxsw_sp *mlxsw_sp = priv;
4088
4089         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
4090 }
4091
4092 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
4093                                        u8 local_port, void *priv)
4094 {
4095         struct mlxsw_sp *mlxsw_sp = priv;
4096         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
4097         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
4098
4099         if (unlikely(!mlxsw_sp_port)) {
4100                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
4101                                      local_port);
4102                 return;
4103         }
4104
4105         skb->dev = mlxsw_sp_port->dev;
4106
4107         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
4108         u64_stats_update_begin(&pcpu_stats->syncp);
4109         pcpu_stats->rx_packets++;
4110         pcpu_stats->rx_bytes += skb->len;
4111         u64_stats_update_end(&pcpu_stats->syncp);
4112
4113         skb->protocol = eth_type_trans(skb, skb->dev);
4114         netif_receive_skb(skb);
4115 }
4116
4117 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
4118                                            void *priv)
4119 {
4120         skb->offload_fwd_mark = 1;
4121         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
4122 }
4123
4124 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
4125                                               u8 local_port, void *priv)
4126 {
4127         skb->offload_l3_fwd_mark = 1;
4128         skb->offload_fwd_mark = 1;
4129         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
4130 }
4131
4132 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
4133                                              void *priv)
4134 {
4135         struct mlxsw_sp *mlxsw_sp = priv;
4136         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
4137         struct psample_group *psample_group;
4138         u32 size;
4139
4140         if (unlikely(!mlxsw_sp_port)) {
4141                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
4142                                      local_port);
4143                 goto out;
4144         }
4145         if (unlikely(!mlxsw_sp_port->sample)) {
4146                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
4147                                      local_port);
4148                 goto out;
4149         }
4150
4151         size = mlxsw_sp_port->sample->truncate ?
4152                   mlxsw_sp_port->sample->trunc_size : skb->len;
4153
4154         rcu_read_lock();
4155         psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
4156         if (!psample_group)
4157                 goto out_unlock;
4158         psample_sample_packet(psample_group, skb, size,
4159                               mlxsw_sp_port->dev->ifindex, 0,
4160                               mlxsw_sp_port->sample->rate);
4161 out_unlock:
4162         rcu_read_unlock();
4163 out:
4164         consume_skb(skb);
4165 }
4166
4167 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port,
4168                                      void *priv)
4169 {
4170         struct mlxsw_sp *mlxsw_sp = priv;
4171
4172         mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
4173 }
4174
4175 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
4176         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
4177                   _is_ctrl, SP_##_trap_group, DISCARD)
4178
4179 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
4180         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
4181                 _is_ctrl, SP_##_trap_group, DISCARD)
4182
4183 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
4184         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
4185                 _is_ctrl, SP_##_trap_group, DISCARD)
4186
4187 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
4188         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
4189
4190 static const struct mlxsw_listener mlxsw_sp_listener[] = {
4191         /* Events */
4192         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
4193         /* L2 traps */
4194         MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
4195         MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
4196         MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU,
4197                   false, SP_LLDP, DISCARD),
4198         MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
4199         MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
4200         MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
4201         MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
4202         MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
4203         MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
4204         MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
4205         MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
4206         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
4207         MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
4208                           false),
4209         MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
4210                              false),
4211         MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
4212                              false),
4213         MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
4214                              false),
4215         /* L3 traps */
4216         MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
4217         MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
4218         MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
4219         MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
4220         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
4221                           false),
4222         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
4223         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
4224         MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
4225         MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
4226                           false),
4227         MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
4228         MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
4229         MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
4230         MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
4231         MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
4232         MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
4233         MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
4234                           false),
4235         MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
4236                           false),
4237         MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
4238                           false),
4239         MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
4240                           false),
4241         MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
4242         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
4243                           false),
4244         MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
4245         MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
4246         MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
4247         MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
4248         MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
4249         MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
4250         MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
4251         MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
4252         /* PKT Sample trap */
4253         MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
4254                   false, SP_IP2ME, DISCARD),
4255         /* ACL trap */
4256         MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
4257         /* Multicast Router Traps */
4258         MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
4259         MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
4260         MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
4261         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
4262         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
4263         /* NVE traps */
4264         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
4265         MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
4266         /* PTP traps */
4267         MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU,
4268                   false, SP_PTP0, DISCARD),
4269         MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false),
4270 };
4271
4272 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
4273         /* Events */
4274         MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
4275         MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
4276 };
4277
4278 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
4279 {
4280         char qpcr_pl[MLXSW_REG_QPCR_LEN];
4281         enum mlxsw_reg_qpcr_ir_units ir_units;
4282         int max_cpu_policers;
4283         bool is_bytes;
4284         u8 burst_size;
4285         u32 rate;
4286         int i, err;
4287
4288         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
4289                 return -EIO;
4290
4291         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
4292
4293         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
4294         for (i = 0; i < max_cpu_policers; i++) {
4295                 is_bytes = false;
4296                 switch (i) {
4297                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
4298                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
4299                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
4300                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
4301                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
4302                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
4303                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
4304                         rate = 128;
4305                         burst_size = 7;
4306                         break;
4307                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
4308                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
4309                         rate = 16 * 1024;
4310                         burst_size = 10;
4311                         break;
4312                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
4313                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
4314                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
4315                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
4316                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
4317                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
4318                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
4319                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
4320                         rate = 1024;
4321                         burst_size = 7;
4322                         break;
4323                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
4324                         rate = 1024;
4325                         burst_size = 7;
4326                         break;
4327                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
4328                         rate = 24 * 1024;
4329                         burst_size = 12;
4330                         break;
4331                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
4332                         rate = 19 * 1024;
4333                         burst_size = 12;
4334                         break;
4335                 default:
4336                         continue;
4337                 }
4338
4339                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
4340                                     burst_size);
4341                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
4342                 if (err)
4343                         return err;
4344         }
4345
4346         return 0;
4347 }
4348
4349 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
4350 {
4351         char htgt_pl[MLXSW_REG_HTGT_LEN];
4352         enum mlxsw_reg_htgt_trap_group i;
4353         int max_cpu_policers;
4354         int max_trap_groups;
4355         u8 priority, tc;
4356         u16 policer_id;
4357         int err;
4358
4359         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
4360                 return -EIO;
4361
4362         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
4363         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
4364
4365         for (i = 0; i < max_trap_groups; i++) {
4366                 policer_id = i;
4367                 switch (i) {
4368                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
4369                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
4370                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
4371                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
4372                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
4373                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
4374                         priority = 5;
4375                         tc = 5;
4376                         break;
4377                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
4378                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
4379                         priority = 4;
4380                         tc = 4;
4381                         break;
4382                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
4383                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
4384                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
4385                         priority = 3;
4386                         tc = 3;
4387                         break;
4388                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
4389                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
4390                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
4391                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
4392                         priority = 2;
4393                         tc = 2;
4394                         break;
4395                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
4396                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
4397                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
4398                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
4399                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
4400                         priority = 1;
4401                         tc = 1;
4402                         break;
4403                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
4404                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
4405                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
4406                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
4407                         break;
4408                 default:
4409                         continue;
4410                 }
4411
4412                 if (max_cpu_policers <= policer_id &&
4413                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
4414                         return -EIO;
4415
4416                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
4417                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
4418                 if (err)
4419                         return err;
4420         }
4421
4422         return 0;
4423 }
4424
4425 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
4426                                    const struct mlxsw_listener listeners[],
4427                                    size_t listeners_count)
4428 {
4429         int i;
4430         int err;
4431
4432         for (i = 0; i < listeners_count; i++) {
4433                 err = mlxsw_core_trap_register(mlxsw_sp->core,
4434                                                &listeners[i],
4435                                                mlxsw_sp);
4436                 if (err)
4437                         goto err_listener_register;
4438
4439         }
4440         return 0;
4441
4442 err_listener_register:
4443         for (i--; i >= 0; i--) {
4444                 mlxsw_core_trap_unregister(mlxsw_sp->core,
4445                                            &listeners[i],
4446                                            mlxsw_sp);
4447         }
4448         return err;
4449 }
4450
4451 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
4452                                       const struct mlxsw_listener listeners[],
4453                                       size_t listeners_count)
4454 {
4455         int i;
4456
4457         for (i = 0; i < listeners_count; i++) {
4458                 mlxsw_core_trap_unregister(mlxsw_sp->core,
4459                                            &listeners[i],
4460                                            mlxsw_sp);
4461         }
4462 }
4463
4464 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
4465 {
4466         int err;
4467
4468         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
4469         if (err)
4470                 return err;
4471
4472         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
4473         if (err)
4474                 return err;
4475
4476         err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
4477                                       ARRAY_SIZE(mlxsw_sp_listener));
4478         if (err)
4479                 return err;
4480
4481         err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
4482                                       mlxsw_sp->listeners_count);
4483         if (err)
4484                 goto err_extra_traps_init;
4485
4486         return 0;
4487
4488 err_extra_traps_init:
4489         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
4490                                   ARRAY_SIZE(mlxsw_sp_listener));
4491         return err;
4492 }
4493
4494 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
4495 {
4496         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
4497                                   mlxsw_sp->listeners_count);
4498         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
4499                                   ARRAY_SIZE(mlxsw_sp_listener));
4500 }
4501
4502 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
4503
4504 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
4505 {
4506         char slcr_pl[MLXSW_REG_SLCR_LEN];
4507         u32 seed;
4508         int err;
4509
4510         seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
4511                      MLXSW_SP_LAG_SEED_INIT);
4512         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
4513                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
4514                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
4515                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
4516                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
4517                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
4518                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
4519                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
4520                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
4521         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
4522         if (err)
4523                 return err;
4524
4525         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
4526             !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
4527                 return -EIO;
4528
4529         mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
4530                                  sizeof(struct mlxsw_sp_upper),
4531                                  GFP_KERNEL);
4532         if (!mlxsw_sp->lags)
4533                 return -ENOMEM;
4534
4535         return 0;
4536 }
4537
4538 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
4539 {
4540         kfree(mlxsw_sp->lags);
4541 }
4542
4543 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
4544 {
4545         char htgt_pl[MLXSW_REG_HTGT_LEN];
4546
4547         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
4548                             MLXSW_REG_HTGT_INVALID_POLICER,
4549                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
4550                             MLXSW_REG_HTGT_DEFAULT_TC);
4551         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
4552 }
4553
4554 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
4555         .clock_init     = mlxsw_sp1_ptp_clock_init,
4556         .clock_fini     = mlxsw_sp1_ptp_clock_fini,
4557         .init           = mlxsw_sp1_ptp_init,
4558         .fini           = mlxsw_sp1_ptp_fini,
4559         .receive        = mlxsw_sp1_ptp_receive,
4560         .transmitted    = mlxsw_sp1_ptp_transmitted,
4561         .hwtstamp_get   = mlxsw_sp1_ptp_hwtstamp_get,
4562         .hwtstamp_set   = mlxsw_sp1_ptp_hwtstamp_set,
4563         .get_ts_info    = mlxsw_sp1_ptp_get_ts_info,
4564 };
4565
4566 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
4567         .clock_init     = mlxsw_sp2_ptp_clock_init,
4568         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
4569         .init           = mlxsw_sp2_ptp_init,
4570         .fini           = mlxsw_sp2_ptp_fini,
4571         .receive        = mlxsw_sp2_ptp_receive,
4572         .transmitted    = mlxsw_sp2_ptp_transmitted,
4573         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
4574         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
4575         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
4576 };
4577
4578 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4579                                     unsigned long event, void *ptr);
4580
4581 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
4582                          const struct mlxsw_bus_info *mlxsw_bus_info)
4583 {
4584         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4585         int err;
4586
4587         mlxsw_sp->core = mlxsw_core;
4588         mlxsw_sp->bus_info = mlxsw_bus_info;
4589
4590         err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
4591         if (err)
4592                 return err;
4593
4594         err = mlxsw_sp_base_mac_get(mlxsw_sp);
4595         if (err) {
4596                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
4597                 return err;
4598         }
4599
4600         err = mlxsw_sp_kvdl_init(mlxsw_sp);
4601         if (err) {
4602                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
4603                 return err;
4604         }
4605
4606         err = mlxsw_sp_fids_init(mlxsw_sp);
4607         if (err) {
4608                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
4609                 goto err_fids_init;
4610         }
4611
4612         err = mlxsw_sp_traps_init(mlxsw_sp);
4613         if (err) {
4614                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
4615                 goto err_traps_init;
4616         }
4617
4618         err = mlxsw_sp_buffers_init(mlxsw_sp);
4619         if (err) {
4620                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
4621                 goto err_buffers_init;
4622         }
4623
4624         err = mlxsw_sp_lag_init(mlxsw_sp);
4625         if (err) {
4626                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
4627                 goto err_lag_init;
4628         }
4629
4630         /* Initialize SPAN before router and switchdev, so that those components
4631          * can call mlxsw_sp_span_respin().
4632          */
4633         err = mlxsw_sp_span_init(mlxsw_sp);
4634         if (err) {
4635                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
4636                 goto err_span_init;
4637         }
4638
4639         err = mlxsw_sp_switchdev_init(mlxsw_sp);
4640         if (err) {
4641                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
4642                 goto err_switchdev_init;
4643         }
4644
4645         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
4646         if (err) {
4647                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
4648                 goto err_counter_pool_init;
4649         }
4650
4651         err = mlxsw_sp_afa_init(mlxsw_sp);
4652         if (err) {
4653                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
4654                 goto err_afa_init;
4655         }
4656
4657         err = mlxsw_sp_nve_init(mlxsw_sp);
4658         if (err) {
4659                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
4660                 goto err_nve_init;
4661         }
4662
4663         err = mlxsw_sp_acl_init(mlxsw_sp);
4664         if (err) {
4665                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
4666                 goto err_acl_init;
4667         }
4668
4669         err = mlxsw_sp_router_init(mlxsw_sp);
4670         if (err) {
4671                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
4672                 goto err_router_init;
4673         }
4674
4675         if (mlxsw_sp->bus_info->read_frc_capable) {
4676                 /* NULL is a valid return value from clock_init */
4677                 mlxsw_sp->clock =
4678                         mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
4679                                                       mlxsw_sp->bus_info->dev);
4680                 if (IS_ERR(mlxsw_sp->clock)) {
4681                         err = PTR_ERR(mlxsw_sp->clock);
4682                         dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
4683                         goto err_ptp_clock_init;
4684                 }
4685         }
4686
4687         if (mlxsw_sp->clock) {
4688                 /* NULL is a valid return value from ptp_ops->init */
4689                 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
4690                 if (IS_ERR(mlxsw_sp->ptp_state)) {
4691                         err = PTR_ERR(mlxsw_sp->ptp_state);
4692                         dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
4693                         goto err_ptp_init;
4694                 }
4695         }
4696
4697         /* Initialize netdevice notifier after router and SPAN is initialized,
4698          * so that the event handler can use router structures and call SPAN
4699          * respin.
4700          */
4701         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
4702         err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4703         if (err) {
4704                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
4705                 goto err_netdev_notifier;
4706         }
4707
4708         err = mlxsw_sp_dpipe_init(mlxsw_sp);
4709         if (err) {
4710                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
4711                 goto err_dpipe_init;
4712         }
4713
4714         err = mlxsw_sp_ports_create(mlxsw_sp);
4715         if (err) {
4716                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
4717                 goto err_ports_create;
4718         }
4719
4720         return 0;
4721
4722 err_ports_create:
4723         mlxsw_sp_dpipe_fini(mlxsw_sp);
4724 err_dpipe_init:
4725         unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4726 err_netdev_notifier:
4727         if (mlxsw_sp->clock)
4728                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
4729 err_ptp_init:
4730         if (mlxsw_sp->clock)
4731                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
4732 err_ptp_clock_init:
4733         mlxsw_sp_router_fini(mlxsw_sp);
4734 err_router_init:
4735         mlxsw_sp_acl_fini(mlxsw_sp);
4736 err_acl_init:
4737         mlxsw_sp_nve_fini(mlxsw_sp);
4738 err_nve_init:
4739         mlxsw_sp_afa_fini(mlxsw_sp);
4740 err_afa_init:
4741         mlxsw_sp_counter_pool_fini(mlxsw_sp);
4742 err_counter_pool_init:
4743         mlxsw_sp_switchdev_fini(mlxsw_sp);
4744 err_switchdev_init:
4745         mlxsw_sp_span_fini(mlxsw_sp);
4746 err_span_init:
4747         mlxsw_sp_lag_fini(mlxsw_sp);
4748 err_lag_init:
4749         mlxsw_sp_buffers_fini(mlxsw_sp);
4750 err_buffers_init:
4751         mlxsw_sp_traps_fini(mlxsw_sp);
4752 err_traps_init:
4753         mlxsw_sp_fids_fini(mlxsw_sp);
4754 err_fids_init:
4755         mlxsw_sp_kvdl_fini(mlxsw_sp);
4756         return err;
4757 }
4758
4759 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
4760                           const struct mlxsw_bus_info *mlxsw_bus_info)
4761 {
4762         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4763
4764         mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
4765         mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
4766         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
4767         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
4768         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
4769         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
4770         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
4771         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
4772         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
4773         mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
4774         mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
4775         mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
4776         mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
4777         mlxsw_sp->listeners = mlxsw_sp1_listener;
4778         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
4779
4780         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
4781 }
4782
4783 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
4784                           const struct mlxsw_bus_info *mlxsw_bus_info)
4785 {
4786         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4787
4788         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
4789         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
4790         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
4791         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
4792         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
4793         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
4794         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
4795         mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
4796         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
4797         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
4798         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
4799
4800         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
4801 }
4802
4803 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
4804 {
4805         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4806
4807         mlxsw_sp_ports_remove(mlxsw_sp);
4808         mlxsw_sp_dpipe_fini(mlxsw_sp);
4809         unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4810         if (mlxsw_sp->clock) {
4811                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
4812                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
4813         }
4814         mlxsw_sp_router_fini(mlxsw_sp);
4815         mlxsw_sp_acl_fini(mlxsw_sp);
4816         mlxsw_sp_nve_fini(mlxsw_sp);
4817         mlxsw_sp_afa_fini(mlxsw_sp);
4818         mlxsw_sp_counter_pool_fini(mlxsw_sp);
4819         mlxsw_sp_switchdev_fini(mlxsw_sp);
4820         mlxsw_sp_span_fini(mlxsw_sp);
4821         mlxsw_sp_lag_fini(mlxsw_sp);
4822         mlxsw_sp_buffers_fini(mlxsw_sp);
4823         mlxsw_sp_traps_fini(mlxsw_sp);
4824         mlxsw_sp_fids_fini(mlxsw_sp);
4825         mlxsw_sp_kvdl_fini(mlxsw_sp);
4826 }
4827
4828 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
4829  * 802.1Q FIDs
4830  */
4831 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE   (MLXSW_SP_FID_8021D_MAX + \
4832                                          VLAN_VID_MASK - 1)
4833
4834 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
4835         .used_max_mid                   = 1,
4836         .max_mid                        = MLXSW_SP_MID_MAX,
4837         .used_flood_tables              = 1,
4838         .used_flood_mode                = 1,
4839         .flood_mode                     = 3,
4840         .max_fid_flood_tables           = 3,
4841         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
4842         .used_max_ib_mc                 = 1,
4843         .max_ib_mc                      = 0,
4844         .used_max_pkey                  = 1,
4845         .max_pkey                       = 0,
4846         .used_kvd_sizes                 = 1,
4847         .kvd_hash_single_parts          = 59,
4848         .kvd_hash_double_parts          = 41,
4849         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
4850         .swid_config                    = {
4851                 {
4852                         .used_type      = 1,
4853                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
4854                 }
4855         },
4856 };
4857
4858 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
4859         .used_max_mid                   = 1,
4860         .max_mid                        = MLXSW_SP_MID_MAX,
4861         .used_flood_tables              = 1,
4862         .used_flood_mode                = 1,
4863         .flood_mode                     = 3,
4864         .max_fid_flood_tables           = 3,
4865         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
4866         .used_max_ib_mc                 = 1,
4867         .max_ib_mc                      = 0,
4868         .used_max_pkey                  = 1,
4869         .max_pkey                       = 0,
4870         .swid_config                    = {
4871                 {
4872                         .used_type      = 1,
4873                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
4874                 }
4875         },
4876 };
4877
4878 static void
4879 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
4880                                       struct devlink_resource_size_params *kvd_size_params,
4881                                       struct devlink_resource_size_params *linear_size_params,
4882                                       struct devlink_resource_size_params *hash_double_size_params,
4883                                       struct devlink_resource_size_params *hash_single_size_params)
4884 {
4885         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4886                                                  KVD_SINGLE_MIN_SIZE);
4887         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4888                                                  KVD_DOUBLE_MIN_SIZE);
4889         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4890         u32 linear_size_min = 0;
4891
4892         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
4893                                           MLXSW_SP_KVD_GRANULARITY,
4894                                           DEVLINK_RESOURCE_UNIT_ENTRY);
4895         devlink_resource_size_params_init(linear_size_params, linear_size_min,
4896                                           kvd_size - single_size_min -
4897                                           double_size_min,
4898                                           MLXSW_SP_KVD_GRANULARITY,
4899                                           DEVLINK_RESOURCE_UNIT_ENTRY);
4900         devlink_resource_size_params_init(hash_double_size_params,
4901                                           double_size_min,
4902                                           kvd_size - single_size_min -
4903                                           linear_size_min,
4904                                           MLXSW_SP_KVD_GRANULARITY,
4905                                           DEVLINK_RESOURCE_UNIT_ENTRY);
4906         devlink_resource_size_params_init(hash_single_size_params,
4907                                           single_size_min,
4908                                           kvd_size - double_size_min -
4909                                           linear_size_min,
4910                                           MLXSW_SP_KVD_GRANULARITY,
4911                                           DEVLINK_RESOURCE_UNIT_ENTRY);
4912 }
4913
4914 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
4915 {
4916         struct devlink *devlink = priv_to_devlink(mlxsw_core);
4917         struct devlink_resource_size_params hash_single_size_params;
4918         struct devlink_resource_size_params hash_double_size_params;
4919         struct devlink_resource_size_params linear_size_params;
4920         struct devlink_resource_size_params kvd_size_params;
4921         u32 kvd_size, single_size, double_size, linear_size;
4922         const struct mlxsw_config_profile *profile;
4923         int err;
4924
4925         profile = &mlxsw_sp1_config_profile;
4926         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
4927                 return -EIO;
4928
4929         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
4930                                               &linear_size_params,
4931                                               &hash_double_size_params,
4932                                               &hash_single_size_params);
4933
4934         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4935         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
4936                                         kvd_size, MLXSW_SP_RESOURCE_KVD,
4937                                         DEVLINK_RESOURCE_ID_PARENT_TOP,
4938                                         &kvd_size_params);
4939         if (err)
4940                 return err;
4941
4942         linear_size = profile->kvd_linear_size;
4943         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
4944                                         linear_size,
4945                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
4946                                         MLXSW_SP_RESOURCE_KVD,
4947                                         &linear_size_params);
4948         if (err)
4949                 return err;
4950
4951         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
4952         if  (err)
4953                 return err;
4954
4955         double_size = kvd_size - linear_size;
4956         double_size *= profile->kvd_hash_double_parts;
4957         double_size /= profile->kvd_hash_double_parts +
4958                        profile->kvd_hash_single_parts;
4959         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
4960         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
4961                                         double_size,
4962                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4963                                         MLXSW_SP_RESOURCE_KVD,
4964                                         &hash_double_size_params);
4965         if (err)
4966                 return err;
4967
4968         single_size = kvd_size - double_size - linear_size;
4969         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
4970                                         single_size,
4971                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4972                                         MLXSW_SP_RESOURCE_KVD,
4973                                         &hash_single_size_params);
4974         if (err)
4975                 return err;
4976
4977         return 0;
4978 }
4979
4980 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
4981 {
4982         return mlxsw_sp1_resources_kvd_register(mlxsw_core);
4983 }
4984
4985 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
4986 {
4987         return 0;
4988 }
4989
4990 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
4991                                   const struct mlxsw_config_profile *profile,
4992                                   u64 *p_single_size, u64 *p_double_size,
4993                                   u64 *p_linear_size)
4994 {
4995         struct devlink *devlink = priv_to_devlink(mlxsw_core);
4996         u32 double_size;
4997         int err;
4998
4999         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
5000             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
5001                 return -EIO;
5002
5003         /* The hash part is what left of the kvd without the
5004          * linear part. It is split to the single size and
5005          * double size by the parts ratio from the profile.
5006          * Both sizes must be a multiplications of the
5007          * granularity from the profile. In case the user
5008          * provided the sizes they are obtained via devlink.
5009          */
5010         err = devlink_resource_size_get(devlink,
5011                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
5012                                         p_linear_size);
5013         if (err)
5014                 *p_linear_size = profile->kvd_linear_size;
5015
5016         err = devlink_resource_size_get(devlink,
5017                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
5018                                         p_double_size);
5019         if (err) {
5020                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
5021                               *p_linear_size;
5022                 double_size *= profile->kvd_hash_double_parts;
5023                 double_size /= profile->kvd_hash_double_parts +
5024                                profile->kvd_hash_single_parts;
5025                 *p_double_size = rounddown(double_size,
5026                                            MLXSW_SP_KVD_GRANULARITY);
5027         }
5028
5029         err = devlink_resource_size_get(devlink,
5030                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
5031                                         p_single_size);
5032         if (err)
5033                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
5034                                  *p_double_size - *p_linear_size;
5035
5036         /* Check results are legal. */
5037         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
5038             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
5039             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
5040                 return -EIO;
5041
5042         return 0;
5043 }
5044
5045 static int
5046 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
5047                                                union devlink_param_value val,
5048                                                struct netlink_ext_ack *extack)
5049 {
5050         if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
5051             (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
5052                 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
5053                 return -EINVAL;
5054         }
5055
5056         return 0;
5057 }
5058
5059 static const struct devlink_param mlxsw_sp_devlink_params[] = {
5060         DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
5061                               BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
5062                               NULL, NULL,
5063                               mlxsw_sp_devlink_param_fw_load_policy_validate),
5064 };
5065
5066 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
5067 {
5068         struct devlink *devlink = priv_to_devlink(mlxsw_core);
5069         union devlink_param_value value;
5070         int err;
5071
5072         err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
5073                                       ARRAY_SIZE(mlxsw_sp_devlink_params));
5074         if (err)
5075                 return err;
5076
5077         value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
5078         devlink_param_driverinit_value_set(devlink,
5079                                            DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
5080                                            value);
5081         return 0;
5082 }
5083
5084 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
5085 {
5086         devlink_params_unregister(priv_to_devlink(mlxsw_core),
5087                                   mlxsw_sp_devlink_params,
5088                                   ARRAY_SIZE(mlxsw_sp_devlink_params));
5089 }
5090
5091 static int
5092 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
5093                                              struct devlink_param_gset_ctx *ctx)
5094 {
5095         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
5096         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
5097
5098         ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
5099         return 0;
5100 }
5101
5102 static int
5103 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
5104                                              struct devlink_param_gset_ctx *ctx)
5105 {
5106         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
5107         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
5108
5109         return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
5110 }
5111
5112 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
5113         DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
5114                              "acl_region_rehash_interval",
5115                              DEVLINK_PARAM_TYPE_U32,
5116                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
5117                              mlxsw_sp_params_acl_region_rehash_intrvl_get,
5118                              mlxsw_sp_params_acl_region_rehash_intrvl_set,
5119                              NULL),
5120 };
5121
5122 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
5123 {
5124         struct devlink *devlink = priv_to_devlink(mlxsw_core);
5125         union devlink_param_value value;
5126         int err;
5127
5128         err = mlxsw_sp_params_register(mlxsw_core);
5129         if (err)
5130                 return err;
5131
5132         err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
5133                                       ARRAY_SIZE(mlxsw_sp2_devlink_params));
5134         if (err)
5135                 goto err_devlink_params_register;
5136
5137         value.vu32 = 0;
5138         devlink_param_driverinit_value_set(devlink,
5139                                            MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
5140                                            value);
5141         return 0;
5142
5143 err_devlink_params_register:
5144         mlxsw_sp_params_unregister(mlxsw_core);
5145         return err;
5146 }
5147
5148 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
5149 {
5150         devlink_params_unregister(priv_to_devlink(mlxsw_core),
5151                                   mlxsw_sp2_devlink_params,
5152                                   ARRAY_SIZE(mlxsw_sp2_devlink_params));
5153         mlxsw_sp_params_unregister(mlxsw_core);
5154 }
5155
5156 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
5157                                      struct sk_buff *skb, u8 local_port)
5158 {
5159         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
5160
5161         skb_pull(skb, MLXSW_TXHDR_LEN);
5162         mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
5163 }
5164
5165 static struct mlxsw_driver mlxsw_sp1_driver = {
5166         .kind                           = mlxsw_sp1_driver_name,
5167         .priv_size                      = sizeof(struct mlxsw_sp),
5168         .init                           = mlxsw_sp1_init,
5169         .fini                           = mlxsw_sp_fini,
5170         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
5171         .port_split                     = mlxsw_sp_port_split,
5172         .port_unsplit                   = mlxsw_sp_port_unsplit,
5173         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
5174         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
5175         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
5176         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
5177         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
5178         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
5179         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
5180         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
5181         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
5182         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
5183         .flash_update                   = mlxsw_sp_flash_update,
5184         .txhdr_construct                = mlxsw_sp_txhdr_construct,
5185         .resources_register             = mlxsw_sp1_resources_register,
5186         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
5187         .params_register                = mlxsw_sp_params_register,
5188         .params_unregister              = mlxsw_sp_params_unregister,
5189         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
5190         .txhdr_len                      = MLXSW_TXHDR_LEN,
5191         .profile                        = &mlxsw_sp1_config_profile,
5192         .res_query_enabled              = true,
5193 };
5194
5195 static struct mlxsw_driver mlxsw_sp2_driver = {
5196         .kind                           = mlxsw_sp2_driver_name,
5197         .priv_size                      = sizeof(struct mlxsw_sp),
5198         .init                           = mlxsw_sp2_init,
5199         .fini                           = mlxsw_sp_fini,
5200         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
5201         .port_split                     = mlxsw_sp_port_split,
5202         .port_unsplit                   = mlxsw_sp_port_unsplit,
5203         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
5204         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
5205         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
5206         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
5207         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
5208         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
5209         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
5210         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
5211         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
5212         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
5213         .flash_update                   = mlxsw_sp_flash_update,
5214         .txhdr_construct                = mlxsw_sp_txhdr_construct,
5215         .resources_register             = mlxsw_sp2_resources_register,
5216         .params_register                = mlxsw_sp2_params_register,
5217         .params_unregister              = mlxsw_sp2_params_unregister,
5218         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
5219         .txhdr_len                      = MLXSW_TXHDR_LEN,
5220         .profile                        = &mlxsw_sp2_config_profile,
5221         .res_query_enabled              = true,
5222 };
5223
5224 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
5225 {
5226         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
5227 }
5228
5229 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
5230 {
5231         struct mlxsw_sp_port **p_mlxsw_sp_port = data;
5232         int ret = 0;
5233
5234         if (mlxsw_sp_port_dev_check(lower_dev)) {
5235                 *p_mlxsw_sp_port = netdev_priv(lower_dev);
5236                 ret = 1;
5237         }
5238
5239         return ret;
5240 }
5241
5242 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
5243 {
5244         struct mlxsw_sp_port *mlxsw_sp_port;
5245
5246         if (mlxsw_sp_port_dev_check(dev))
5247                 return netdev_priv(dev);
5248
5249         mlxsw_sp_port = NULL;
5250         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
5251
5252         return mlxsw_sp_port;
5253 }
5254
5255 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
5256 {
5257         struct mlxsw_sp_port *mlxsw_sp_port;
5258
5259         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
5260         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
5261 }
5262
5263 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
5264 {
5265         struct mlxsw_sp_port *mlxsw_sp_port;
5266
5267         if (mlxsw_sp_port_dev_check(dev))
5268                 return netdev_priv(dev);
5269
5270         mlxsw_sp_port = NULL;
5271         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
5272                                       &mlxsw_sp_port);
5273
5274         return mlxsw_sp_port;
5275 }
5276
5277 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
5278 {
5279         struct mlxsw_sp_port *mlxsw_sp_port;
5280
5281         rcu_read_lock();
5282         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
5283         if (mlxsw_sp_port)
5284                 dev_hold(mlxsw_sp_port->dev);
5285         rcu_read_unlock();
5286         return mlxsw_sp_port;
5287 }
5288
5289 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
5290 {
5291         dev_put(mlxsw_sp_port->dev);
5292 }
5293
5294 static void
5295 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
5296                                  struct net_device *lag_dev)
5297 {
5298         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
5299         struct net_device *upper_dev;
5300         struct list_head *iter;
5301
5302         if (netif_is_bridge_port(lag_dev))
5303                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
5304
5305         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
5306                 if (!netif_is_bridge_port(upper_dev))
5307                         continue;
5308                 br_dev = netdev_master_upper_dev_get(upper_dev);
5309                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
5310         }
5311 }
5312
5313 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
5314 {
5315         char sldr_pl[MLXSW_REG_SLDR_LEN];
5316
5317         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
5318         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
5319 }
5320
5321 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
5322 {
5323         char sldr_pl[MLXSW_REG_SLDR_LEN];
5324
5325         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
5326         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
5327 }
5328
5329 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
5330                                      u16 lag_id, u8 port_index)
5331 {
5332         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5333         char slcor_pl[MLXSW_REG_SLCOR_LEN];
5334
5335         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
5336                                       lag_id, port_index);
5337         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
5338 }
5339
5340 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
5341                                         u16 lag_id)
5342 {
5343         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5344         char slcor_pl[MLXSW_REG_SLCOR_LEN];
5345
5346         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
5347                                          lag_id);
5348         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
5349 }
5350
5351 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
5352                                         u16 lag_id)
5353 {
5354         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5355         char slcor_pl[MLXSW_REG_SLCOR_LEN];
5356
5357         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
5358                                         lag_id);
5359         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
5360 }
5361
5362 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
5363                                          u16 lag_id)
5364 {
5365         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5366         char slcor_pl[MLXSW_REG_SLCOR_LEN];
5367
5368         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
5369                                          lag_id);
5370         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
5371 }
5372
5373 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
5374                                   struct net_device *lag_dev,
5375                                   u16 *p_lag_id)
5376 {
5377         struct mlxsw_sp_upper *lag;
5378         int free_lag_id = -1;
5379         u64 max_lag;
5380         int i;
5381
5382         max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
5383         for (i = 0; i < max_lag; i++) {
5384                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
5385                 if (lag->ref_count) {
5386                         if (lag->dev == lag_dev) {
5387                                 *p_lag_id = i;
5388                                 return 0;
5389                         }
5390                 } else if (free_lag_id < 0) {
5391                         free_lag_id = i;
5392                 }
5393         }
5394         if (free_lag_id < 0)
5395                 return -EBUSY;
5396         *p_lag_id = free_lag_id;
5397         return 0;
5398 }
5399
5400 static bool
5401 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
5402                           struct net_device *lag_dev,
5403                           struct netdev_lag_upper_info *lag_upper_info,
5404                           struct netlink_ext_ack *extack)
5405 {
5406         u16 lag_id;
5407
5408         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
5409                 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
5410                 return false;
5411         }
5412         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
5413                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
5414                 return false;
5415         }
5416         return true;
5417 }
5418
5419 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
5420                                        u16 lag_id, u8 *p_port_index)
5421 {
5422         u64 max_lag_members;
5423         int i;
5424
5425         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
5426                                              MAX_LAG_MEMBERS);
5427         for (i = 0; i < max_lag_members; i++) {
5428                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
5429                         *p_port_index = i;
5430                         return 0;
5431                 }
5432         }
5433         return -EBUSY;
5434 }
5435
5436 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
5437                                   struct net_device *lag_dev)
5438 {
5439         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5440         struct mlxsw_sp_upper *lag;
5441         u16 lag_id;
5442         u8 port_index;
5443         int err;
5444
5445         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
5446         if (err)
5447                 return err;
5448         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
5449         if (!lag->ref_count) {
5450                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
5451                 if (err)
5452                         return err;
5453                 lag->dev = lag_dev;
5454         }
5455
5456         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
5457         if (err)
5458                 return err;
5459         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
5460         if (err)
5461                 goto err_col_port_add;
5462
5463         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
5464                                    mlxsw_sp_port->local_port);
5465         mlxsw_sp_port->lag_id = lag_id;
5466         mlxsw_sp_port->lagged = 1;
5467         lag->ref_count++;
5468
5469         /* Port is no longer usable as a router interface */
5470         if (mlxsw_sp_port->default_vlan->fid)
5471                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
5472
5473         return 0;
5474
5475 err_col_port_add:
5476         if (!lag->ref_count)
5477                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
5478         return err;
5479 }
5480
5481 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
5482                                     struct net_device *lag_dev)
5483 {
5484         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5485         u16 lag_id = mlxsw_sp_port->lag_id;
5486         struct mlxsw_sp_upper *lag;
5487
5488         if (!mlxsw_sp_port->lagged)
5489                 return;
5490         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
5491         WARN_ON(lag->ref_count == 0);
5492
5493         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
5494
5495         /* Any VLANs configured on the port are no longer valid */
5496         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
5497         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
5498         /* Make the LAG and its directly linked uppers leave bridges they
5499          * are memeber in
5500          */
5501         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
5502
5503         if (lag->ref_count == 1)
5504                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
5505
5506         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
5507                                      mlxsw_sp_port->local_port);
5508         mlxsw_sp_port->lagged = 0;
5509         lag->ref_count--;
5510
5511         /* Make sure untagged frames are allowed to ingress */
5512         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
5513 }
5514
5515 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
5516                                       u16 lag_id)
5517 {
5518         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5519         char sldr_pl[MLXSW_REG_SLDR_LEN];
5520
5521         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
5522                                          mlxsw_sp_port->local_port);
5523         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
5524 }
5525
5526 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
5527                                          u16 lag_id)
5528 {
5529         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5530         char sldr_pl[MLXSW_REG_SLDR_LEN];
5531
5532         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
5533                                             mlxsw_sp_port->local_port);
5534         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
5535 }
5536
5537 static int
5538 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
5539 {
5540         int err;
5541
5542         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
5543                                            mlxsw_sp_port->lag_id);
5544         if (err)
5545                 return err;
5546
5547         err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
5548         if (err)
5549                 goto err_dist_port_add;
5550
5551         return 0;
5552
5553 err_dist_port_add:
5554         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
5555         return err;
5556 }
5557
5558 static int
5559 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
5560 {
5561         int err;
5562
5563         err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
5564                                             mlxsw_sp_port->lag_id);
5565         if (err)
5566                 return err;
5567
5568         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
5569                                             mlxsw_sp_port->lag_id);
5570         if (err)
5571                 goto err_col_port_disable;
5572
5573         return 0;
5574
5575 err_col_port_disable:
5576         mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
5577         return err;
5578 }
5579
5580 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
5581                                      struct netdev_lag_lower_state_info *info)
5582 {
5583         if (info->tx_enabled)
5584                 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
5585         else
5586                 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
5587 }
5588
5589 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
5590                                  bool enable)
5591 {
5592         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5593         enum mlxsw_reg_spms_state spms_state;
5594         char *spms_pl;
5595         u16 vid;
5596         int err;
5597
5598         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
5599                               MLXSW_REG_SPMS_STATE_DISCARDING;
5600
5601         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
5602         if (!spms_pl)
5603                 return -ENOMEM;
5604         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
5605
5606         for (vid = 0; vid < VLAN_N_VID; vid++)
5607                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
5608
5609         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
5610         kfree(spms_pl);
5611         return err;
5612 }
5613
5614 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
5615 {
5616         u16 vid = 1;
5617         int err;
5618
5619         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
5620         if (err)
5621                 return err;
5622         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
5623         if (err)
5624                 goto err_port_stp_set;
5625         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
5626                                      true, false);
5627         if (err)
5628                 goto err_port_vlan_set;
5629
5630         for (; vid <= VLAN_N_VID - 1; vid++) {
5631                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
5632                                                      vid, false);
5633                 if (err)
5634                         goto err_vid_learning_set;
5635         }
5636
5637         return 0;
5638
5639 err_vid_learning_set:
5640         for (vid--; vid >= 1; vid--)
5641                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
5642 err_port_vlan_set:
5643         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
5644 err_port_stp_set:
5645         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
5646         return err;
5647 }
5648
5649 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
5650 {
5651         u16 vid;
5652
5653         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
5654                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
5655                                                vid, true);
5656
5657         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
5658                                false, false);
5659         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
5660         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
5661 }
5662
5663 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
5664 {
5665         unsigned int num_vxlans = 0;
5666         struct net_device *dev;
5667         struct list_head *iter;
5668
5669         netdev_for_each_lower_dev(br_dev, dev, iter) {
5670                 if (netif_is_vxlan(dev))
5671                         num_vxlans++;
5672         }
5673
5674         return num_vxlans > 1;
5675 }
5676
5677 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
5678 {
5679         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
5680         struct net_device *dev;
5681         struct list_head *iter;
5682
5683         netdev_for_each_lower_dev(br_dev, dev, iter) {
5684                 u16 pvid;
5685                 int err;
5686
5687                 if (!netif_is_vxlan(dev))
5688                         continue;
5689
5690                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
5691                 if (err || !pvid)
5692                         continue;
5693
5694                 if (test_and_set_bit(pvid, vlans))
5695                         return false;
5696         }
5697
5698         return true;
5699 }
5700
5701 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
5702                                            struct netlink_ext_ack *extack)
5703 {
5704         if (br_multicast_enabled(br_dev)) {
5705                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
5706                 return false;
5707         }
5708
5709         if (!br_vlan_enabled(br_dev) &&
5710             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
5711                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
5712                 return false;
5713         }
5714
5715         if (br_vlan_enabled(br_dev) &&
5716             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
5717                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
5718                 return false;
5719         }
5720
5721         return true;
5722 }
5723
5724 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
5725                                                struct net_device *dev,
5726                                                unsigned long event, void *ptr)
5727 {
5728         struct netdev_notifier_changeupper_info *info;
5729         struct mlxsw_sp_port *mlxsw_sp_port;
5730         struct netlink_ext_ack *extack;
5731         struct net_device *upper_dev;
5732         struct mlxsw_sp *mlxsw_sp;
5733         int err = 0;
5734
5735         mlxsw_sp_port = netdev_priv(dev);
5736         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5737         info = ptr;
5738         extack = netdev_notifier_info_to_extack(&info->info);
5739
5740         switch (event) {
5741         case NETDEV_PRECHANGEUPPER:
5742                 upper_dev = info->upper_dev;
5743                 if (!is_vlan_dev(upper_dev) &&
5744                     !netif_is_lag_master(upper_dev) &&
5745                     !netif_is_bridge_master(upper_dev) &&
5746                     !netif_is_ovs_master(upper_dev) &&
5747                     !netif_is_macvlan(upper_dev)) {
5748                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5749                         return -EINVAL;
5750                 }
5751                 if (!info->linking)
5752                         break;
5753                 if (netif_is_bridge_master(upper_dev) &&
5754                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5755                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5756                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5757                         return -EOPNOTSUPP;
5758                 if (netdev_has_any_upper_dev(upper_dev) &&
5759                     (!netif_is_bridge_master(upper_dev) ||
5760                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5761                                                           upper_dev))) {
5762                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
5763                         return -EINVAL;
5764                 }
5765                 if (netif_is_lag_master(upper_dev) &&
5766                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
5767                                                info->upper_info, extack))
5768                         return -EINVAL;
5769                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
5770                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
5771                         return -EINVAL;
5772                 }
5773                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
5774                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
5775                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
5776                         return -EINVAL;
5777                 }
5778                 if (netif_is_macvlan(upper_dev) &&
5779                     !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
5780                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5781                         return -EOPNOTSUPP;
5782                 }
5783                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
5784                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
5785                         return -EINVAL;
5786                 }
5787                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
5788                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
5789                         return -EINVAL;
5790                 }
5791                 break;
5792         case NETDEV_CHANGEUPPER:
5793                 upper_dev = info->upper_dev;
5794                 if (netif_is_bridge_master(upper_dev)) {
5795                         if (info->linking)
5796                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5797                                                                 lower_dev,
5798                                                                 upper_dev,
5799                                                                 extack);
5800                         else
5801                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5802                                                            lower_dev,
5803                                                            upper_dev);
5804                 } else if (netif_is_lag_master(upper_dev)) {
5805                         if (info->linking) {
5806                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
5807                                                              upper_dev);
5808                         } else {
5809                                 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
5810                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
5811                                                         upper_dev);
5812                         }
5813                 } else if (netif_is_ovs_master(upper_dev)) {
5814                         if (info->linking)
5815                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
5816                         else
5817                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
5818                 } else if (netif_is_macvlan(upper_dev)) {
5819                         if (!info->linking)
5820                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5821                 } else if (is_vlan_dev(upper_dev)) {
5822                         struct net_device *br_dev;
5823
5824                         if (!netif_is_bridge_port(upper_dev))
5825                                 break;
5826                         if (info->linking)
5827                                 break;
5828                         br_dev = netdev_master_upper_dev_get(upper_dev);
5829                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
5830                                                    br_dev);
5831                 }
5832                 break;
5833         }
5834
5835         return err;
5836 }
5837
5838 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5839                                                unsigned long event, void *ptr)
5840 {
5841         struct netdev_notifier_changelowerstate_info *info;
5842         struct mlxsw_sp_port *mlxsw_sp_port;
5843         int err;
5844
5845         mlxsw_sp_port = netdev_priv(dev);
5846         info = ptr;
5847
5848         switch (event) {
5849         case NETDEV_CHANGELOWERSTATE:
5850                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5851                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5852                                                         info->lower_state_info);
5853                         if (err)
5854                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
5855                 }
5856                 break;
5857         }
5858
5859         return 0;
5860 }
5861
5862 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5863                                          struct net_device *port_dev,
5864                                          unsigned long event, void *ptr)
5865 {
5866         switch (event) {
5867         case NETDEV_PRECHANGEUPPER:
5868         case NETDEV_CHANGEUPPER:
5869                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
5870                                                            event, ptr);
5871         case NETDEV_CHANGELOWERSTATE:
5872                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
5873                                                            ptr);
5874         }
5875
5876         return 0;
5877 }
5878
5879 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5880                                         unsigned long event, void *ptr)
5881 {
5882         struct net_device *dev;
5883         struct list_head *iter;
5884         int ret;
5885
5886         netdev_for_each_lower_dev(lag_dev, dev, iter) {
5887                 if (mlxsw_sp_port_dev_check(dev)) {
5888                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5889                                                             ptr);
5890                         if (ret)
5891                                 return ret;
5892                 }
5893         }
5894
5895         return 0;
5896 }
5897
5898 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5899                                               struct net_device *dev,
5900                                               unsigned long event, void *ptr,
5901                                               u16 vid)
5902 {
5903         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5904         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5905         struct netdev_notifier_changeupper_info *info = ptr;
5906         struct netlink_ext_ack *extack;
5907         struct net_device *upper_dev;
5908         int err = 0;
5909
5910         extack = netdev_notifier_info_to_extack(&info->info);
5911
5912         switch (event) {
5913         case NETDEV_PRECHANGEUPPER:
5914                 upper_dev = info->upper_dev;
5915                 if (!netif_is_bridge_master(upper_dev) &&
5916                     !netif_is_macvlan(upper_dev)) {
5917                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5918                         return -EINVAL;
5919                 }
5920                 if (!info->linking)
5921                         break;
5922                 if (netif_is_bridge_master(upper_dev) &&
5923                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5924                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5925                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5926                         return -EOPNOTSUPP;
5927                 if (netdev_has_any_upper_dev(upper_dev) &&
5928                     (!netif_is_bridge_master(upper_dev) ||
5929                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5930                                                           upper_dev))) {
5931                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
5932                         return -EINVAL;
5933                 }
5934                 if (netif_is_macvlan(upper_dev) &&
5935                     !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
5936                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5937                         return -EOPNOTSUPP;
5938                 }
5939                 break;
5940         case NETDEV_CHANGEUPPER:
5941                 upper_dev = info->upper_dev;
5942                 if (netif_is_bridge_master(upper_dev)) {
5943                         if (info->linking)
5944                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5945                                                                 vlan_dev,
5946                                                                 upper_dev,
5947                                                                 extack);
5948                         else
5949                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5950                                                            vlan_dev,
5951                                                            upper_dev);
5952                 } else if (netif_is_macvlan(upper_dev)) {
5953                         if (!info->linking)
5954                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5955                 } else {
5956                         err = -EINVAL;
5957                         WARN_ON(1);
5958                 }
5959                 break;
5960         }
5961
5962         return err;
5963 }
5964
5965 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5966                                                   struct net_device *lag_dev,
5967                                                   unsigned long event,
5968                                                   void *ptr, u16 vid)
5969 {
5970         struct net_device *dev;
5971         struct list_head *iter;
5972         int ret;
5973
5974         netdev_for_each_lower_dev(lag_dev, dev, iter) {
5975                 if (mlxsw_sp_port_dev_check(dev)) {
5976                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5977                                                                  event, ptr,
5978                                                                  vid);
5979                         if (ret)
5980                                 return ret;
5981                 }
5982         }
5983
5984         return 0;
5985 }
5986
5987 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
5988                                                 struct net_device *br_dev,
5989                                                 unsigned long event, void *ptr,
5990                                                 u16 vid)
5991 {
5992         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
5993         struct netdev_notifier_changeupper_info *info = ptr;
5994         struct netlink_ext_ack *extack;
5995         struct net_device *upper_dev;
5996
5997         if (!mlxsw_sp)
5998                 return 0;
5999
6000         extack = netdev_notifier_info_to_extack(&info->info);
6001
6002         switch (event) {
6003         case NETDEV_PRECHANGEUPPER:
6004                 upper_dev = info->upper_dev;
6005                 if (!netif_is_macvlan(upper_dev)) {
6006                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
6007                         return -EOPNOTSUPP;
6008                 }
6009                 if (!info->linking)
6010                         break;
6011                 if (netif_is_macvlan(upper_dev) &&
6012                     !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
6013                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
6014                         return -EOPNOTSUPP;
6015                 }
6016                 break;
6017         case NETDEV_CHANGEUPPER:
6018                 upper_dev = info->upper_dev;
6019                 if (info->linking)
6020                         break;
6021                 if (netif_is_macvlan(upper_dev))
6022                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
6023                 break;
6024         }
6025
6026         return 0;
6027 }
6028
6029 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
6030                                          unsigned long event, void *ptr)
6031 {
6032         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
6033         u16 vid = vlan_dev_vlan_id(vlan_dev);
6034
6035         if (mlxsw_sp_port_dev_check(real_dev))
6036                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
6037                                                           event, ptr, vid);
6038         else if (netif_is_lag_master(real_dev))
6039                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
6040                                                               real_dev, event,
6041                                                               ptr, vid);
6042         else if (netif_is_bridge_master(real_dev))
6043                 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
6044                                                             event, ptr, vid);
6045
6046         return 0;
6047 }
6048
6049 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
6050                                            unsigned long event, void *ptr)
6051 {
6052         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
6053         struct netdev_notifier_changeupper_info *info = ptr;
6054         struct netlink_ext_ack *extack;
6055         struct net_device *upper_dev;
6056
6057         if (!mlxsw_sp)
6058                 return 0;
6059
6060         extack = netdev_notifier_info_to_extack(&info->info);
6061
6062         switch (event) {
6063         case NETDEV_PRECHANGEUPPER:
6064                 upper_dev = info->upper_dev;
6065                 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
6066                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
6067                         return -EOPNOTSUPP;
6068                 }
6069                 if (!info->linking)
6070                         break;
6071                 if (netif_is_macvlan(upper_dev) &&
6072                     !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
6073                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
6074                         return -EOPNOTSUPP;
6075                 }
6076                 break;
6077         case NETDEV_CHANGEUPPER:
6078                 upper_dev = info->upper_dev;
6079                 if (info->linking)
6080                         break;
6081                 if (is_vlan_dev(upper_dev))
6082                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
6083                 if (netif_is_macvlan(upper_dev))
6084                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
6085                 break;
6086         }
6087
6088         return 0;
6089 }
6090
6091 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
6092                                             unsigned long event, void *ptr)
6093 {
6094         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
6095         struct netdev_notifier_changeupper_info *info = ptr;
6096         struct netlink_ext_ack *extack;
6097
6098         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
6099                 return 0;
6100
6101         extack = netdev_notifier_info_to_extack(&info->info);
6102
6103         /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
6104         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
6105
6106         return -EOPNOTSUPP;
6107 }
6108
6109 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
6110 {
6111         struct netdev_notifier_changeupper_info *info = ptr;
6112
6113         if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
6114                 return false;
6115         return netif_is_l3_master(info->upper_dev);
6116 }
6117
6118 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
6119                                           struct net_device *dev,
6120                                           unsigned long event, void *ptr)
6121 {
6122         struct netdev_notifier_changeupper_info *cu_info;
6123         struct netdev_notifier_info *info = ptr;
6124         struct netlink_ext_ack *extack;
6125         struct net_device *upper_dev;
6126
6127         extack = netdev_notifier_info_to_extack(info);
6128
6129         switch (event) {
6130         case NETDEV_CHANGEUPPER:
6131                 cu_info = container_of(info,
6132                                        struct netdev_notifier_changeupper_info,
6133                                        info);
6134                 upper_dev = cu_info->upper_dev;
6135                 if (!netif_is_bridge_master(upper_dev))
6136                         return 0;
6137                 if (!mlxsw_sp_lower_get(upper_dev))
6138                         return 0;
6139                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
6140                         return -EOPNOTSUPP;
6141                 if (cu_info->linking) {
6142                         if (!netif_running(dev))
6143                                 return 0;
6144                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
6145                          * device needs to be mapped to a VLAN, but at this
6146                          * point no VLANs are configured on the VxLAN device
6147                          */
6148                         if (br_vlan_enabled(upper_dev))
6149                                 return 0;
6150                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
6151                                                           dev, 0, extack);
6152                 } else {
6153                         /* VLANs were already flushed, which triggered the
6154                          * necessary cleanup
6155                          */
6156                         if (br_vlan_enabled(upper_dev))
6157                                 return 0;
6158                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
6159                 }
6160                 break;
6161         case NETDEV_PRE_UP:
6162                 upper_dev = netdev_master_upper_dev_get(dev);
6163                 if (!upper_dev)
6164                         return 0;
6165                 if (!netif_is_bridge_master(upper_dev))
6166                         return 0;
6167                 if (!mlxsw_sp_lower_get(upper_dev))
6168                         return 0;
6169                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
6170                                                   extack);
6171         case NETDEV_DOWN:
6172                 upper_dev = netdev_master_upper_dev_get(dev);
6173                 if (!upper_dev)
6174                         return 0;
6175                 if (!netif_is_bridge_master(upper_dev))
6176                         return 0;
6177                 if (!mlxsw_sp_lower_get(upper_dev))
6178                         return 0;
6179                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
6180                 break;
6181         }
6182
6183         return 0;
6184 }
6185
6186 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
6187                                     unsigned long event, void *ptr)
6188 {
6189         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6190         struct mlxsw_sp_span_entry *span_entry;
6191         struct mlxsw_sp *mlxsw_sp;
6192         int err = 0;
6193
6194         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
6195         if (event == NETDEV_UNREGISTER) {
6196                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
6197                 if (span_entry)
6198                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
6199         }
6200         mlxsw_sp_span_respin(mlxsw_sp);
6201
6202         if (netif_is_vxlan(dev))
6203                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
6204         if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
6205                 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
6206                                                        event, ptr);
6207         else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
6208                 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
6209                                                        event, ptr);
6210         else if (event == NETDEV_PRE_CHANGEADDR ||
6211                  event == NETDEV_CHANGEADDR ||
6212                  event == NETDEV_CHANGEMTU)
6213                 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
6214         else if (mlxsw_sp_is_vrf_event(event, ptr))
6215                 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
6216         else if (mlxsw_sp_port_dev_check(dev))
6217                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
6218         else if (netif_is_lag_master(dev))
6219                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
6220         else if (is_vlan_dev(dev))
6221                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
6222         else if (netif_is_bridge_master(dev))
6223                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
6224         else if (netif_is_macvlan(dev))
6225                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
6226
6227         return notifier_from_errno(err);
6228 }
6229
6230 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
6231         .notifier_call = mlxsw_sp_inetaddr_valid_event,
6232 };
6233
6234 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
6235         .notifier_call = mlxsw_sp_inet6addr_valid_event,
6236 };
6237
6238 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
6239         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
6240         {0, },
6241 };
6242
6243 static struct pci_driver mlxsw_sp1_pci_driver = {
6244         .name = mlxsw_sp1_driver_name,
6245         .id_table = mlxsw_sp1_pci_id_table,
6246 };
6247
6248 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
6249         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
6250         {0, },
6251 };
6252
6253 static struct pci_driver mlxsw_sp2_pci_driver = {
6254         .name = mlxsw_sp2_driver_name,
6255         .id_table = mlxsw_sp2_pci_id_table,
6256 };
6257
6258 static int __init mlxsw_sp_module_init(void)
6259 {
6260         int err;
6261
6262         register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
6263         register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
6264
6265         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
6266         if (err)
6267                 goto err_sp1_core_driver_register;
6268
6269         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
6270         if (err)
6271                 goto err_sp2_core_driver_register;
6272
6273         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
6274         if (err)
6275                 goto err_sp1_pci_driver_register;
6276
6277         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
6278         if (err)
6279                 goto err_sp2_pci_driver_register;
6280
6281         return 0;
6282
6283 err_sp2_pci_driver_register:
6284         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
6285 err_sp1_pci_driver_register:
6286         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
6287 err_sp2_core_driver_register:
6288         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
6289 err_sp1_core_driver_register:
6290         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
6291         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
6292         return err;
6293 }
6294
6295 static void __exit mlxsw_sp_module_exit(void)
6296 {
6297         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
6298         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
6299         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
6300         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
6301         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
6302         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
6303 }
6304
6305 module_init(mlxsw_sp_module_init);
6306 module_exit(mlxsw_sp_module_exit);
6307
6308 MODULE_LICENSE("Dual BSD/GPL");
6309 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
6310 MODULE_DESCRIPTION("Mellanox Spectrum driver");
6311 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
6312 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
6313 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);