Merge tag 'linux-can-next-for-5.13-20210414' of git://git.kernel.org/pub/scm/linux...
authorDavid S. Miller <davem@davemloft.net>
Wed, 14 Apr 2021 21:37:02 +0000 (14:37 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 14 Apr 2021 21:37:02 +0000 (14:37 -0700)
Marc Kleine-Budde says:

====================
pull-request: can-next 2021-04-14

this is a pull request of a single patch for net-next/master.

Vincent Mailhol's patch fixes a NULL pointer dereference when handling
error frames in the etas_es58x driver, which has been added in the
previous PR.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
62 files changed:
Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst
drivers/atm/idt77252.c
drivers/net/Space.c
drivers/net/ethernet/freescale/dpaa2/Makefile
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
drivers/net/ethernet/freescale/dpaa2/dpsw.c
drivers/net/ethernet/freescale/dpaa2/dpsw.h
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/devlink.h
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/sfc/enum.h
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
drivers/net/phy/marvell-88x2222.c
include/linux/mlx5/eswitch.h
include/linux/stmmac.h
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_sysfs_br.c
net/core/skbuff.c
net/ipv4/esp4.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/packet/af_packet.c
net/packet/internal.h

index 1b7e32d..936a10f 100644 (file)
@@ -183,6 +183,40 @@ User command examples:
       values:
          cmode driverinit value true
 
+esw_port_metadata: Eswitch port metadata state
+----------------------------------------------
+When applicable, disabling Eswitch metadata can increase packet rate
+up to 20% depending on the use case and packet sizes.
+
+Eswitch port metadata state controls whether to internally tag packets with
+metadata. Metadata tagging must be enabled for multi-port RoCE, failover
+between representors and stacked devices.
+By default metadata is enabled on the supported devices in E-switch.
+Metadata is applicable only for E-switch in switchdev mode and
+users may disable it when NONE of the below use cases will be in use:
+1. HCA is in Dual/multi-port RoCE mode.
+2. VF/SF representor bonding (Usually used for Live migration)
+3. Stacked devices
+
+When metadata is disabled, the above use cases will fail to initialize if
+users try to enable them.
+
+- Show eswitch port metadata::
+
+    $ devlink dev param show pci/0000:06:00.0 name esw_port_metadata
+      pci/0000:06:00.0:
+        name esw_port_metadata type driver-specific
+          values:
+            cmode runtime value true
+
+- Disable eswitch port metadata::
+
+    $ devlink dev param set pci/0000:06:00.0 name esw_port_metadata value false cmode runtime
+
+- Change eswitch mode to switchdev mode where after choosing the metadata value::
+
+    $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
+
 mlx5 subfunction
 ================
 mlx5 supports subfunction management using devlink port (see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface.
index 0c13cac..9e4bd75 100644 (file)
@@ -1783,12 +1783,6 @@ set_tct(struct idt77252_dev *card, struct vc_map *vc)
 /*                                                                           */
 /*****************************************************************************/
 
-static __inline__ int
-idt77252_fbq_level(struct idt77252_dev *card, int queue)
-{
-       return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) & 0x0f;
-}
-
 static __inline__ int
 idt77252_fbq_full(struct idt77252_dev *card, int queue)
 {
index 890c86e..df79e73 100644 (file)
@@ -59,9 +59,6 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
  * look for EISA/PCI cards in addition to ISA cards).
  */
 static struct devprobe2 isa_probes[] __initdata = {
-#if defined(CONFIG_HP100) && defined(CONFIG_ISA)       /* ISA, EISA */
-       {hp100_probe, 0},
-#endif
 #ifdef CONFIG_3C515
        {tc515_probe, 0},
 #endif
index 644ef9a..c2ef740 100644 (file)
@@ -11,7 +11,7 @@ fsl-dpaa2-eth-objs    := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpa
 fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
 fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
 fsl-dpaa2-ptp-objs     := dpaa2-ptp.o dprtc.o
-fsl-dpaa2-switch-objs  := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o
+fsl-dpaa2-switch-objs  := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o
 
 # Needed by the tracing framework
 CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
new file mode 100644 (file)
index 0000000..f9451ec
--- /dev/null
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch flower support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include "dpaa2-switch.h"
+
+static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
+                                        struct dpsw_acl_key *acl_key)
+{
+       struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+       struct flow_dissector *dissector = rule->match.dissector;
+       struct netlink_ext_ack *extack = cls->common.extack;
+       struct dpsw_acl_fields *acl_h, *acl_m;
+
+       if (dissector->used_keys &
+           ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+             BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+             BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+             BIT(FLOW_DISSECTOR_KEY_VLAN) |
+             BIT(FLOW_DISSECTOR_KEY_PORTS) |
+             BIT(FLOW_DISSECTOR_KEY_IP) |
+             BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+             BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Unsupported keys used");
+               return -EOPNOTSUPP;
+       }
+
+       acl_h = &acl_key->match;
+       acl_m = &acl_key->mask;
+
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+               struct flow_match_basic match;
+
+               flow_rule_match_basic(rule, &match);
+               acl_h->l3_protocol = match.key->ip_proto;
+               acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
+               acl_m->l3_protocol = match.mask->ip_proto;
+               acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
+       }
+
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+               struct flow_match_eth_addrs match;
+
+               flow_rule_match_eth_addrs(rule, &match);
+               ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
+               ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
+               ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
+               ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
+       }
+
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+               struct flow_match_vlan match;
+
+               flow_rule_match_vlan(rule, &match);
+               acl_h->l2_vlan_id = match.key->vlan_id;
+               acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
+               acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
+                                   match.key->vlan_dei;
+
+               acl_m->l2_vlan_id = match.mask->vlan_id;
+               acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
+               acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
+                                   match.mask->vlan_dei;
+       }
+
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+               struct flow_match_ipv4_addrs match;
+
+               flow_rule_match_ipv4_addrs(rule, &match);
+               acl_h->l3_source_ip = be32_to_cpu(match.key->src);
+               acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
+               acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
+               acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
+       }
+
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+               struct flow_match_ports match;
+
+               flow_rule_match_ports(rule, &match);
+               acl_h->l4_source_port = be16_to_cpu(match.key->src);
+               acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
+               acl_m->l4_source_port = be16_to_cpu(match.mask->src);
+               acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
+       }
+
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+               struct flow_match_ip match;
+
+               flow_rule_match_ip(rule, &match);
+               if (match.mask->ttl != 0) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Matching on TTL not supported");
+                       return -EOPNOTSUPP;
+               }
+
+               if ((match.mask->tos & 0x3) != 0) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Matching on ECN not supported, only DSCP");
+                       return -EOPNOTSUPP;
+               }
+
+               acl_h->l3_dscp = match.key->tos >> 2;
+               acl_m->l3_dscp = match.mask->tos >> 2;
+       }
+
+       return 0;
+}
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+                              struct dpaa2_switch_acl_entry *entry)
+{
+       struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+       struct ethsw_core *ethsw = acl_tbl->ethsw;
+       struct dpsw_acl_key *acl_key = &entry->key;
+       struct device *dev = ethsw->dev;
+       u8 *cmd_buff;
+       int err;
+
+       cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+       if (!cmd_buff)
+               return -ENOMEM;
+
+       dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+       acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+                                                DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+                                                DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+               dev_err(dev, "DMA mapping failed\n");
+               return -EFAULT;
+       }
+
+       err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+                                acl_tbl->id, acl_entry_cfg);
+
+       dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+                        DMA_TO_DEVICE);
+       if (err) {
+               dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+               return err;
+       }
+
+       kfree(cmd_buff);
+
+       return 0;
+}
+
+static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                        struct dpaa2_switch_acl_entry *entry)
+{
+       struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+       struct dpsw_acl_key *acl_key = &entry->key;
+       struct ethsw_core *ethsw = acl_tbl->ethsw;
+       struct device *dev = ethsw->dev;
+       u8 *cmd_buff;
+       int err;
+
+       cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+       if (!cmd_buff)
+               return -ENOMEM;
+
+       dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+       acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+                                                DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+                                                DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+               dev_err(dev, "DMA mapping failed\n");
+               return -EFAULT;
+       }
+
+       err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+                                   acl_tbl->id, acl_entry_cfg);
+
+       dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+                        DMA_TO_DEVICE);
+       if (err) {
+               dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+               return err;
+       }
+
+       kfree(cmd_buff);
+
+       return 0;
+}
+
+static int
+dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                  struct dpaa2_switch_acl_entry *entry)
+{
+       struct dpaa2_switch_acl_entry *tmp;
+       struct list_head *pos, *n;
+       int index = 0;
+
+       if (list_empty(&acl_tbl->entries)) {
+               list_add(&entry->list, &acl_tbl->entries);
+               return index;
+       }
+
+       list_for_each_safe(pos, n, &acl_tbl->entries) {
+               tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
+               if (entry->prio < tmp->prio)
+                       break;
+               index++;
+       }
+       list_add(&entry->list, pos->prev);
+       return index;
+}
+
+static struct dpaa2_switch_acl_entry*
+dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                   int index)
+{
+       struct dpaa2_switch_acl_entry *tmp;
+       int i = 0;
+
+       list_for_each_entry(tmp, &acl_tbl->entries, list) {
+               if (i == index)
+                       return tmp;
+               ++i;
+       }
+
+       return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                     struct dpaa2_switch_acl_entry *entry,
+                                     int precedence)
+{
+       int err;
+
+       err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+       if (err)
+               return err;
+
+       entry->cfg.precedence = precedence;
+       return dpaa2_switch_acl_entry_add(acl_tbl, entry);
+}
+
+static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                         struct dpaa2_switch_acl_entry *entry)
+{
+       struct dpaa2_switch_acl_entry *tmp;
+       int index, i, precedence, err;
+
+       /* Add the new ACL entry to the linked list and get its index */
+       index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry);
+
+       /* Move up in priority the ACL entries to make space
+        * for the new filter.
+        */
+       precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1;
+       for (i = 0; i < index; i++) {
+               tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
+
+               err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+                                                           precedence);
+               if (err)
+                       return err;
+
+               precedence++;
+       }
+
+       /* Add the new entry to hardware */
+       entry->cfg.precedence = precedence;
+       err = dpaa2_switch_acl_entry_add(acl_tbl, entry);
+       acl_tbl->num_rules++;
+
+       return err;
+}
+
+static struct dpaa2_switch_acl_entry *
+dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                         unsigned long cookie)
+{
+       struct dpaa2_switch_acl_entry *tmp, *n;
+
+       list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+               if (tmp->cookie == cookie)
+                       return tmp;
+       }
+       return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                struct dpaa2_switch_acl_entry *entry)
+{
+       struct dpaa2_switch_acl_entry *tmp, *n;
+       int index = 0;
+
+       list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+               if (tmp->cookie == entry->cookie)
+                       return index;
+               index++;
+       }
+       return -ENOENT;
+}
+
+static int
+dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                 struct dpaa2_switch_acl_entry *entry)
+{
+       struct dpaa2_switch_acl_entry *tmp;
+       int index, i, precedence, err;
+
+       index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry);
+
+       /* Remove from hardware the ACL entry */
+       err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+       if (err)
+               return err;
+
+       acl_tbl->num_rules--;
+
+       /* Remove it from the list also */
+       list_del(&entry->list);
+
+       /* Move down in priority the entries over the deleted one */
+       precedence = entry->cfg.precedence;
+       for (i = index - 1; i >= 0; i--) {
+               tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
+               err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+                                                           precedence);
+               if (err)
+                       return err;
+
+               precedence--;
+       }
+
+       kfree(entry);
+
+       return 0;
+}
+
+static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw,
+                                       struct flow_action_entry *cls_act,
+                                       struct dpsw_acl_result *dpsw_act,
+                                       struct netlink_ext_ack *extack)
+{
+       int err = 0;
+
+       switch (cls_act->id) {
+       case FLOW_ACTION_TRAP:
+               dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
+               break;
+       case FLOW_ACTION_REDIRECT:
+               if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Destination not a DPAA2 switch port");
+                       return -EOPNOTSUPP;
+               }
+
+               dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+               dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
+               break;
+       case FLOW_ACTION_DROP:
+               dpsw_act->action = DPSW_ACL_ACTION_DROP;
+               break;
+       default:
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Action not supported");
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+out:
+       return err;
+}
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                   struct flow_cls_offload *cls)
+{
+       struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+       struct netlink_ext_ack *extack = cls->common.extack;
+       struct ethsw_core *ethsw = acl_tbl->ethsw;
+       struct dpaa2_switch_acl_entry *acl_entry;
+       struct flow_action_entry *act;
+       int err;
+
+       if (!flow_offload_has_one_action(&rule->action)) {
+               NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+               return -EOPNOTSUPP;
+       }
+
+       if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+               NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+               return -ENOMEM;
+       }
+
+       acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+       if (!acl_entry)
+               return -ENOMEM;
+
+       err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
+       if (err)
+               goto free_acl_entry;
+
+       act = &rule->action.entries[0];
+       err = dpaa2_switch_tc_parse_action(ethsw, act,
+                                          &acl_entry->cfg.result, extack);
+       if (err)
+               goto free_acl_entry;
+
+       acl_entry->prio = cls->common.prio;
+       acl_entry->cookie = cls->cookie;
+
+       err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+       if (err)
+               goto free_acl_entry;
+
+       return 0;
+
+free_acl_entry:
+       kfree(acl_entry);
+
+       return err;
+}
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                   struct flow_cls_offload *cls)
+{
+       struct dpaa2_switch_acl_entry *entry;
+
+       entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
+       if (!entry)
+               return 0;
+
+       return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+}
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                     struct tc_cls_matchall_offload *cls)
+{
+       struct netlink_ext_ack *extack = cls->common.extack;
+       struct ethsw_core *ethsw = acl_tbl->ethsw;
+       struct dpaa2_switch_acl_entry *acl_entry;
+       struct flow_action_entry *act;
+       int err;
+
+       if (!flow_offload_has_one_action(&cls->rule->action)) {
+               NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+               return -EOPNOTSUPP;
+       }
+
+       if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+               NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+               return -ENOMEM;
+       }
+
+       acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+       if (!acl_entry)
+               return -ENOMEM;
+
+       act = &cls->rule->action.entries[0];
+       err = dpaa2_switch_tc_parse_action(ethsw, act,
+                                          &acl_entry->cfg.result, extack);
+       if (err)
+               goto free_acl_entry;
+
+       acl_entry->prio = cls->common.prio;
+       acl_entry->cookie = cls->cookie;
+
+       err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+       if (err)
+               goto free_acl_entry;
+
+       return 0;
+
+free_acl_entry:
+       kfree(acl_entry);
+
+       return err;
+}
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                     struct tc_cls_matchall_offload *cls)
+{
+       struct dpaa2_switch_acl_entry *entry;
+
+       entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
+       if (!entry)
+               return 0;
+
+       return  dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+}
index 80efc81..5250d51 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kthread.h>
 #include <linux/workqueue.h>
 #include <linux/iommu.h>
+#include <net/pkt_cls.h>
 
 #include <linux/fsl/mc.h>
 
@@ -40,6 +41,17 @@ static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *e
        return NULL;
 }
 
+static struct dpaa2_switch_acl_tbl *
+dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw)
+{
+       int i;
+
+       for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+               if (!ethsw->acls[i].in_use)
+                       return &ethsw->acls[i];
+       return NULL;
+}
+
 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
                                     struct net_device *bridge_dev)
 {
@@ -1114,6 +1126,259 @@ err_exit:
        return NETDEV_TX_OK;
 }
 
+static int
+dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                struct flow_cls_offload *f)
+{
+       switch (f->command) {
+       case FLOW_CLS_REPLACE:
+               return dpaa2_switch_cls_flower_replace(acl_tbl, f);
+       case FLOW_CLS_DESTROY:
+               return dpaa2_switch_cls_flower_destroy(acl_tbl, f);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int
+dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                  struct tc_cls_matchall_offload *f)
+{
+       switch (f->command) {
+       case TC_CLSMATCHALL_REPLACE:
+               return dpaa2_switch_cls_matchall_replace(acl_tbl, f);
+       case TC_CLSMATCHALL_DESTROY:
+               return dpaa2_switch_cls_matchall_destroy(acl_tbl, f);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
+                                                 void *type_data,
+                                                 void *cb_priv)
+{
+       switch (type) {
+       case TC_SETUP_CLSFLOWER:
+               return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
+       case TC_SETUP_CLSMATCHALL:
+               return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static LIST_HEAD(dpaa2_switch_block_cb_list);
+
+static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
+                                         struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+       struct ethsw_core *ethsw = port_priv->ethsw_data;
+       struct net_device *netdev = port_priv->netdev;
+       struct dpsw_acl_if_cfg acl_if_cfg;
+       int err;
+
+       if (port_priv->acl_tbl)
+               return -EINVAL;
+
+       acl_if_cfg.if_id[0] = port_priv->idx;
+       acl_if_cfg.num_ifs = 1;
+       err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+                             acl_tbl->id, &acl_if_cfg);
+       if (err) {
+               netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+               return err;
+       }
+
+       acl_tbl->ports |= BIT(port_priv->idx);
+       port_priv->acl_tbl = acl_tbl;
+
+       return 0;
+}
+
+static int
+dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
+                                struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+       struct ethsw_core *ethsw = port_priv->ethsw_data;
+       struct net_device *netdev = port_priv->netdev;
+       struct dpsw_acl_if_cfg acl_if_cfg;
+       int err;
+
+       if (port_priv->acl_tbl != acl_tbl)
+               return -EINVAL;
+
+       acl_if_cfg.if_id[0] = port_priv->idx;
+       acl_if_cfg.num_ifs = 1;
+       err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+                                acl_tbl->id, &acl_if_cfg);
+       if (err) {
+               netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+               return err;
+       }
+
+       acl_tbl->ports &= ~BIT(port_priv->idx);
+       port_priv->acl_tbl = NULL;
+       return 0;
+}
+
+static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
+                                       struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+       struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl;
+       int err;
+
+       /* If the port is already bound to this ACL table then do nothing. This
+        * can happen when this port is the first one to join a tc block
+        */
+       if (port_priv->acl_tbl == acl_tbl)
+               return 0;
+
+       err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl);
+       if (err)
+               return err;
+
+       /* Mark the previous ACL table as being unused if this was the last
+        * port that was using it.
+        */
+       if (old_acl_tbl->ports == 0)
+               old_acl_tbl->in_use = false;
+
+       return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+}
+
+static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
+                                         struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+       struct ethsw_core *ethsw = port_priv->ethsw_data;
+       struct dpaa2_switch_acl_tbl *new_acl_tbl;
+       int err;
+
+       /* We are the last port that leaves a block (an ACL table).
+        * We'll continue to use this table.
+        */
+       if (acl_tbl->ports == BIT(port_priv->idx))
+               return 0;
+
+       err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl);
+       if (err)
+               return err;
+
+       if (acl_tbl->ports == 0)
+               acl_tbl->in_use = false;
+
+       new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
+       new_acl_tbl->in_use = true;
+       return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl);
+}
+
+static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
+                                           struct flow_block_offload *f)
+{
+       struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+       struct ethsw_core *ethsw = port_priv->ethsw_data;
+       struct dpaa2_switch_acl_tbl *acl_tbl;
+       struct flow_block_cb *block_cb;
+       bool register_block = false;
+       int err;
+
+       block_cb = flow_block_cb_lookup(f->block,
+                                       dpaa2_switch_port_setup_tc_block_cb_ig,
+                                       ethsw);
+
+       if (!block_cb) {
+               /* If the ACL table is not already known, then this port must
+                * be the first to join it. In this case, we can just continue
+                * to use our private table
+                */
+               acl_tbl = port_priv->acl_tbl;
+
+               block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
+                                              ethsw, acl_tbl, NULL);
+               if (IS_ERR(block_cb))
+                       return PTR_ERR(block_cb);
+
+               register_block = true;
+       } else {
+               acl_tbl = flow_block_cb_priv(block_cb);
+       }
+
+       flow_block_cb_incref(block_cb);
+       err = dpaa2_switch_port_block_bind(port_priv, acl_tbl);
+       if (err)
+               goto err_block_bind;
+
+       if (register_block) {
+               flow_block_cb_add(block_cb, f);
+               list_add_tail(&block_cb->driver_list,
+                             &dpaa2_switch_block_cb_list);
+       }
+
+       return 0;
+
+err_block_bind:
+       if (!flow_block_cb_decref(block_cb))
+               flow_block_cb_free(block_cb);
+       return err;
+}
+
+static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
+                                              struct flow_block_offload *f)
+{
+       struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+       struct ethsw_core *ethsw = port_priv->ethsw_data;
+       struct dpaa2_switch_acl_tbl *acl_tbl;
+       struct flow_block_cb *block_cb;
+       int err;
+
+       block_cb = flow_block_cb_lookup(f->block,
+                                       dpaa2_switch_port_setup_tc_block_cb_ig,
+                                       ethsw);
+       if (!block_cb)
+               return;
+
+       acl_tbl = flow_block_cb_priv(block_cb);
+       err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl);
+       if (!err && !flow_block_cb_decref(block_cb)) {
+               flow_block_cb_remove(block_cb, f);
+               list_del(&block_cb->driver_list);
+       }
+}
+
+static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
+                                      struct flow_block_offload *f)
+{
+       if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+               return -EOPNOTSUPP;
+
+       f->driver_block_list = &dpaa2_switch_block_cb_list;
+
+       switch (f->command) {
+       case FLOW_BLOCK_BIND:
+               return dpaa2_switch_setup_tc_block_bind(netdev, f);
+       case FLOW_BLOCK_UNBIND:
+               dpaa2_switch_setup_tc_block_unbind(netdev, f);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
+                                     enum tc_setup_type type,
+                                     void *type_data)
+{
+       switch (type) {
+       case TC_SETUP_BLOCK: {
+               return dpaa2_switch_setup_tc_block(netdev, type_data);
+       }
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
 static const struct net_device_ops dpaa2_switch_port_ops = {
        .ndo_open               = dpaa2_switch_port_open,
        .ndo_stop               = dpaa2_switch_port_stop,
@@ -1130,6 +1395,7 @@ static const struct net_device_ops dpaa2_switch_port_ops = {
        .ndo_start_xmit         = dpaa2_switch_port_tx,
        .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
        .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
+       .ndo_setup_tc           = dpaa2_switch_port_setup_tc,
 };
 
 bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
@@ -2676,61 +2942,17 @@ err_close:
 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
                                           const char *mac)
 {
-       struct net_device *netdev = port_priv->netdev;
-       struct dpsw_acl_entry_cfg acl_entry_cfg;
-       struct dpsw_acl_fields *acl_h;
-       struct dpsw_acl_fields *acl_m;
-       struct dpsw_acl_key acl_key;
-       struct device *dev;
-       u8 *cmd_buff;
-       int err;
-
-       dev = port_priv->netdev->dev.parent;
-       acl_h = &acl_key.match;
-       acl_m = &acl_key.mask;
-
-       if (port_priv->acl_num_rules >= DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) {
-               netdev_err(netdev, "ACL full\n");
-               return -ENOMEM;
-       }
-
-       memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg));
-       memset(&acl_key, 0, sizeof(acl_key));
+       struct dpaa2_switch_acl_entry acl_entry = {0};
 
        /* Match on the destination MAC address */
-       ether_addr_copy(acl_h->l2_dest_mac, mac);
-       eth_broadcast_addr(acl_m->l2_dest_mac);
+       ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
+       eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
 
-       cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
-       if (!cmd_buff)
-               return -ENOMEM;
-       dpsw_acl_prepare_entry_cfg(&acl_key, cmd_buff);
-
-       memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg));
-       acl_entry_cfg.precedence = port_priv->acl_num_rules;
-       acl_entry_cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
-       acl_entry_cfg.key_iova = dma_map_single(dev, cmd_buff,
-                                               DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
-                                               DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, acl_entry_cfg.key_iova))) {
-               netdev_err(netdev, "DMA mapping failed\n");
-               return -EFAULT;
-       }
+       /* Trap to CPU */
+       acl_entry.cfg.precedence = 0;
+       acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
 
-       err = dpsw_acl_add_entry(port_priv->ethsw_data->mc_io, 0,
-                                port_priv->ethsw_data->dpsw_handle,
-                                port_priv->acl_tbl, &acl_entry_cfg);
-
-       dma_unmap_single(dev, acl_entry_cfg.key_iova, sizeof(cmd_buff),
-                        DMA_TO_DEVICE);
-       if (err) {
-               netdev_err(netdev, "dpsw_acl_add_entry() failed %d\n", err);
-               return err;
-       }
-
-       port_priv->acl_num_rules++;
-
-       return 0;
+       return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry);
 }
 
 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
@@ -2743,12 +2965,12 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
        };
        struct net_device *netdev = port_priv->netdev;
        struct ethsw_core *ethsw = port_priv->ethsw_data;
+       struct dpaa2_switch_acl_tbl *acl_tbl;
        struct dpsw_fdb_cfg fdb_cfg = {0};
-       struct dpsw_acl_if_cfg acl_if_cfg;
        struct dpsw_if_attr dpsw_if_attr;
        struct dpaa2_switch_fdb *fdb;
        struct dpsw_acl_cfg acl_cfg;
-       u16 fdb_id;
+       u16 fdb_id, acl_tbl_id;
        int err;
 
        /* Get the Tx queue for this specific port */
@@ -2792,21 +3014,22 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
        /* Create an ACL table to be used by this switch port */
        acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
        err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
-                          &port_priv->acl_tbl, &acl_cfg);
+                          &acl_tbl_id, &acl_cfg);
        if (err) {
                netdev_err(netdev, "dpsw_acl_add err %d\n", err);
                return err;
        }
 
-       acl_if_cfg.if_id[0] = port_priv->idx;
-       acl_if_cfg.num_ifs = 1;
-       err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
-                             port_priv->acl_tbl, &acl_if_cfg);
-       if (err) {
-               netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
-               dpsw_acl_remove(ethsw->mc_io, 0, ethsw->dpsw_handle,
-                               port_priv->acl_tbl);
-       }
+       acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
+       acl_tbl->ethsw = ethsw;
+       acl_tbl->id = acl_tbl_id;
+       acl_tbl->in_use = true;
+       acl_tbl->num_rules = 0;
+       INIT_LIST_HEAD(&acl_tbl->entries);
+
+       err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+       if (err)
+               return err;
 
        err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
        if (err)
@@ -2858,6 +3081,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
        }
 
        kfree(ethsw->fdbs);
+       kfree(ethsw->acls);
        kfree(ethsw->ports);
 
        dpaa2_switch_takedown(sw_dev);
@@ -2915,7 +3139,9 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
        /* The DPAA2 switch's ingress path depends on the VLAN table,
         * thus we are not able to disable VLAN filtering.
         */
-       port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER;
+       port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
+                               NETIF_F_HW_VLAN_STAG_FILTER |
+                               NETIF_F_HW_TC;
 
        err = dpaa2_switch_port_init(port_priv, port_idx);
        if (err)
@@ -2983,6 +3209,13 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
                goto err_free_ports;
        }
 
+       ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls),
+                             GFP_KERNEL);
+       if (!ethsw->acls) {
+               err = -ENOMEM;
+               goto err_free_fdbs;
+       }
+
        for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
                err = dpaa2_switch_probe_port(ethsw, i);
                if (err)
@@ -3031,6 +3264,8 @@ err_stop:
 err_free_netdev:
        for (i--; i >= 0; i--)
                free_netdev(ethsw->ports[i]->netdev);
+       kfree(ethsw->acls);
+err_free_fdbs:
        kfree(ethsw->fdbs);
 err_free_ports:
        kfree(ethsw->ports);
index 0ae1d27..bdef71f 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/switchdev.h>
 #include <linux/if_bridge.h>
 #include <linux/fsl/mc.h>
+#include <net/pkt_cls.h>
 #include <soc/fsl/dpaa2-io.h>
 
 #include "dpsw.h"
@@ -80,6 +81,8 @@
        (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN)
 
 #define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES       16
+#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS         1
+
 #define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE      256
 
 extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
@@ -101,6 +104,34 @@ struct dpaa2_switch_fdb {
        bool                    in_use;
 };
 
+struct dpaa2_switch_acl_entry {
+       struct list_head        list;
+       u16                     prio;
+       unsigned long           cookie;
+
+       struct dpsw_acl_entry_cfg cfg;
+       struct dpsw_acl_key     key;
+};
+
+struct dpaa2_switch_acl_tbl {
+       struct list_head        entries;
+       struct ethsw_core       *ethsw;
+       u64                     ports;
+
+       u16                     id;
+       u8                      num_rules;
+       bool                    in_use;
+};
+
+static inline bool
+dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+       if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
+           DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
+               return true;
+       return false;
+}
+
 /* Per port private data */
 struct ethsw_port_priv {
        struct net_device       *netdev;
@@ -118,8 +149,7 @@ struct ethsw_port_priv {
        bool                    ucast_flood;
        bool                    learn_ena;
 
-       u16                     acl_tbl;
-       u8                      acl_num_rules;
+       struct dpaa2_switch_acl_tbl *acl_tbl;
 };
 
 /* Switch data */
@@ -145,8 +175,21 @@ struct ethsw_core {
        int                             napi_users;
 
        struct dpaa2_switch_fdb         *fdbs;
+       struct dpaa2_switch_acl_tbl     *acls;
 };
 
+static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
+                                        struct net_device *netdev)
+{
+       int i;
+
+       for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+               if (ethsw->ports[i]->netdev == netdev)
+                       return ethsw->ports[i]->idx;
+
+       return -EINVAL;
+}
+
 static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
 {
        if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) {
@@ -183,4 +226,21 @@ int dpaa2_switch_port_vlans_del(struct net_device *netdev,
 typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
                                  struct fdb_dump_entry *fdb_entry,
                                  void *data);
+
+/* TC offload */
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                   struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                   struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                     struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+                                     struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+                              struct dpaa2_switch_acl_entry *entry);
 #endif /* __ETHSW_H */
index 1747cee..cb13e74 100644 (file)
@@ -77,6 +77,7 @@
 #define DPSW_CMDID_ACL_ADD                  DPSW_CMD_ID(0x090)
 #define DPSW_CMDID_ACL_REMOVE               DPSW_CMD_ID(0x091)
 #define DPSW_CMDID_ACL_ADD_ENTRY            DPSW_CMD_ID(0x092)
+#define DPSW_CMDID_ACL_REMOVE_ENTRY         DPSW_CMD_ID(0x093)
 #define DPSW_CMDID_ACL_ADD_IF               DPSW_CMD_ID(0x094)
 #define DPSW_CMDID_ACL_REMOVE_IF            DPSW_CMD_ID(0x095)
 
index 6704efe..6352d6d 100644 (file)
@@ -1544,3 +1544,38 @@ int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 
        return mc_send_command(mc_io, &cmd);
 }
+
+/**
+ * dpsw_acl_remove_entry() - Removes an entry from ACL.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPSW object
+ * @acl_id:    ACL ID
+ * @cfg:       Entry configuration
+ *
+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+                         u16 acl_id, const struct dpsw_acl_entry_cfg *cfg)
+{
+       struct dpsw_cmd_acl_entry *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
+       cmd_params->acl_id = cpu_to_le16(acl_id);
+       cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
+       cmd_params->precedence = cpu_to_le32(cfg->precedence);
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       dpsw_set_field(cmd_params->result_action,
+                      RESULT_ACTION,
+                      cfg->result.action);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
index 08e37c4..5ef221a 100644 (file)
@@ -749,4 +749,7 @@ void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
 
 int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
                       u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+                         u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
 #endif /* __FSL_DPSW_H */
index f61fedf..30b22b7 100644 (file)
@@ -390,23 +390,54 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
        return 0;
 }
 
-static void enetc_port_setup_primary_mac_address(struct enetc_si *si)
+static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
+                                  int si)
 {
-       unsigned char mac_addr[MAX_ADDR_LEN];
-       struct enetc_pf *pf = enetc_si_priv(si);
-       struct enetc_hw *hw = &si->hw;
-       int i;
+       struct device *dev = &pf->si->pdev->dev;
+       struct enetc_hw *hw = &pf->si->hw;
+       u8 mac_addr[ETH_ALEN] = { 0 };
+       int err;
 
-       /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */
-       for (i = 0; i < pf->total_vfs + 1; i++) {
-               enetc_pf_get_primary_mac_addr(hw, i, mac_addr);
-               if (!is_zero_ether_addr(mac_addr))
-                       continue;
+       /* (1) try to get the MAC address from the device tree */
+       if (np) {
+               err = of_get_mac_address(np, mac_addr);
+               if (err == -EPROBE_DEFER)
+                       return err;
+       }
+
+       /* (2) bootloader supplied MAC address */
+       if (is_zero_ether_addr(mac_addr))
+               enetc_pf_get_primary_mac_addr(hw, si, mac_addr);
+
+       /* (3) choose a random one */
+       if (is_zero_ether_addr(mac_addr)) {
                eth_random_addr(mac_addr);
-               dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n",
-                        i, mac_addr);
-               enetc_pf_set_primary_mac_addr(hw, i, mac_addr);
+               dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
+                        si, mac_addr);
        }
+
+       enetc_pf_set_primary_mac_addr(hw, si, mac_addr);
+
+       return 0;
+}
+
+static int enetc_setup_mac_addresses(struct device_node *np,
+                                    struct enetc_pf *pf)
+{
+       int err, i;
+
+       /* The PF might take its MAC from the device tree */
+       err = enetc_setup_mac_address(np, pf, 0);
+       if (err)
+               return err;
+
+       for (i = 0; i < pf->total_vfs; i++) {
+               err = enetc_setup_mac_address(NULL, pf, i + 1);
+               if (err)
+                       return err;
+       }
+
+       return 0;
 }
 
 static void enetc_port_assign_rfs_entries(struct enetc_si *si)
@@ -562,9 +593,6 @@ static void enetc_configure_port(struct enetc_pf *pf)
        /* split up RFS entries */
        enetc_port_assign_rfs_entries(pf->si);
 
-       /* fix-up primary MAC addresses, if not set already */
-       enetc_port_setup_primary_mac_address(pf->si);
-
        /* enforce VLAN promisc mode for all SIs */
        pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL;
        enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap);
@@ -1137,6 +1165,10 @@ static int enetc_pf_probe(struct pci_dev *pdev,
        pf->si = si;
        pf->total_vfs = pci_sriov_get_totalvfs(pdev);
 
+       err = enetc_setup_mac_addresses(node, pf);
+       if (err)
+               goto err_setup_mac_addresses;
+
        enetc_configure_port(pf);
 
        enetc_get_si_caps(si);
@@ -1204,6 +1236,7 @@ err_alloc_netdev:
 err_init_port_rss:
 err_init_port_rfs:
 err_device_disabled:
+err_setup_mac_addresses:
        enetc_teardown_cbdr(&si->cbd_ring);
 err_setup_cbdr:
 err_map_pf_space:
index c00332d..72e6ebf 100644 (file)
@@ -361,7 +361,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
        }
 
 #ifdef IXGBE_FCOE
-       /* Reprogam FCoE hardware offloads when the traffic class
+       /* Reprogram FCoE hardware offloads when the traffic class
         * FCoE is using changes. This happens if the APP info
         * changes or the up2tc mapping is updated.
         */
index 73bc170..24aa97f 100644 (file)
@@ -380,6 +380,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
        case X557_PHY_ID2:
                phy_type = ixgbe_phy_x550em_ext_t;
                break;
+       case BCM54616S_E_PHY_ID:
+               phy_type = ixgbe_phy_ext_1g_t;
+               break;
        default:
                phy_type = ixgbe_phy_unknown;
                break;
index 2be1c4c..2647937 100644 (file)
@@ -1407,6 +1407,7 @@ struct ixgbe_nvm_version {
 #define QT2022_PHY_ID    0x0043A400
 #define ATH_PHY_ID       0x03429050
 #define AQ_FW_REV        0x20
+#define BCM54616S_E_PHY_ID 0x03625D10
 
 /* Special PHY Init Routine */
 #define IXGBE_PHY_INIT_OFFSET_NL 0x002B
@@ -3383,10 +3384,6 @@ struct ixgbe_hw_stats {
 /* forward declaration */
 struct ixgbe_hw;
 
-/* iterator type for walking multicast address lists */
-typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
-                                 u32 *vmdq);
-
 /* Function pointer table */
 struct ixgbe_eeprom_operations {
        s32 (*init_params)(struct ixgbe_hw *);
index d1e9e30..1d8209d 100644 (file)
@@ -16,9 +16,6 @@
 
 struct ixgbe_hw;
 
-/* iterator type for walking multicast address lists */
-typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
-                                 u32 *vmdq);
 struct ixgbe_mac_operations {
        s32 (*init_hw)(struct ixgbe_hw *);
        s32 (*reset_hw)(struct ixgbe_hw *);
index 8bde583..a1223e9 100644 (file)
@@ -50,7 +50,7 @@ mlx5_core-$(CONFIG_MLX5_TC_CT)             += en/tc_ct.o
 # Core extra
 #
 mlx5_core-$(CONFIG_MLX5_ESWITCH)   += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
-                                     ecpf.o rdma.o
+                                     ecpf.o rdma.o esw/legacy.o
 mlx5_core-$(CONFIG_MLX5_ESWITCH)   += esw/acl/helper.o \
                                      esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
                                      esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
index 38c7c44..ee0f535 100644 (file)
@@ -456,6 +456,50 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id
 
        return 0;
 }
+
+static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id,
+                                             struct devlink_param_gset_ctx *ctx)
+{
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+       if (!MLX5_ESWITCH_MANAGER(dev))
+               return -EOPNOTSUPP;
+
+       return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool);
+}
+
+static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id,
+                                             struct devlink_param_gset_ctx *ctx)
+{
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+       if (!MLX5_ESWITCH_MANAGER(dev))
+               return -EOPNOTSUPP;
+
+       ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
+       return 0;
+}
+
+static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id,
+                                                  union devlink_param_value val,
+                                                  struct netlink_ext_ack *extack)
+{
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
+       u8 esw_mode;
+
+       if (!MLX5_ESWITCH_MANAGER(dev)) {
+               NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
+               return -EOPNOTSUPP;
+       }
+       esw_mode = mlx5_eswitch_mode(dev);
+       if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "E-Switch must either disabled or non switchdev mode");
+               return -EBUSY;
+       }
+       return 0;
+}
+
 #endif
 
 static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
@@ -490,6 +534,12 @@ static const struct devlink_param mlx5_devlink_params[] = {
                             BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
                             NULL, NULL,
                             mlx5_devlink_large_group_num_validate),
+       DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+                            "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
+                            BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+                            mlx5_devlink_esw_port_metadata_get,
+                            mlx5_devlink_esw_port_metadata_set,
+                            mlx5_devlink_esw_port_metadata_validate),
 #endif
        DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
                              mlx5_devlink_enable_remote_dev_reset_get,
@@ -519,6 +569,18 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
        devlink_param_driverinit_value_set(devlink,
                                           MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
                                           value);
+
+       if (MLX5_ESWITCH_MANAGER(dev)) {
+               if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) {
+                       dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+                       value.vbool = true;
+               } else {
+                       value.vbool = false;
+               }
+               devlink_param_driverinit_value_set(devlink,
+                                                  MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+                                                  value);
+       }
 #endif
 }
 
index eff107d..7318d44 100644 (file)
@@ -10,6 +10,7 @@ enum mlx5_devlink_param_id {
        MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
        MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
        MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+       MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
 };
 
 struct mlx5_trap_ctx {
index d5b1eb7..5cd466e 100644 (file)
@@ -392,11 +392,11 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
 {
        struct arfs_rule *arfs_rule;
        struct hlist_node *htmp;
+       HLIST_HEAD(del_list);
        int quota = 0;
        int i;
        int j;
 
-       HLIST_HEAD(del_list);
        spin_lock_bh(&priv->fs.arfs->arfs_lock);
        mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
                if (!work_pending(&arfs_rule->arfs_work) &&
@@ -422,10 +422,10 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
 {
        struct hlist_node *htmp;
        struct arfs_rule *rule;
+       HLIST_HEAD(del_list);
        int i;
        int j;
 
-       HLIST_HEAD(del_list);
        spin_lock_bh(&priv->fs.arfs->arfs_lock);
        mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
                hlist_del_init(&rule->hlist);
index 2f47608..6847e7b 100644 (file)
@@ -510,8 +510,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
                        rq->page_pool = NULL;
                        goto err_free_by_rq_type;
                }
-               err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
-                                                MEM_TYPE_PAGE_POOL, rq->page_pool);
+               if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+                       err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+                                                        MEM_TYPE_PAGE_POOL, rq->page_pool);
        }
        if (err)
                goto err_free_by_rq_type;
index e58ef8c..34eb111 100644 (file)
@@ -52,7 +52,7 @@
 #include "diag/en_rep_tracepoint.h"
 
 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
-        max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+       max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
 
 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
new file mode 100644 (file)
index 0000000..8ab1224
--- /dev/null
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies Ltd */
+
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include "esw/acl/lgcy.h"
+#include "esw/legacy.h"
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "fs_core.h"
+
+enum {
+       LEGACY_VEPA_PRIO = 0,
+       LEGACY_FDB_PRIO,
+};
+
+static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
+{
+       struct mlx5_flow_table_attr ft_attr = {};
+       struct mlx5_core_dev *dev = esw->dev;
+       struct mlx5_flow_namespace *root_ns;
+       struct mlx5_flow_table *fdb;
+       int err;
+
+       root_ns = mlx5_get_fdb_sub_ns(dev, 0);
+       if (!root_ns) {
+               esw_warn(dev, "Failed to get FDB flow namespace\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* num FTE 2, num FG 2 */
+       ft_attr.prio = LEGACY_VEPA_PRIO;
+       ft_attr.max_fte = 2;
+       ft_attr.autogroup.max_num_groups = 2;
+       fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+       if (IS_ERR(fdb)) {
+               err = PTR_ERR(fdb);
+               esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
+               return err;
+       }
+       esw->fdb_table.legacy.vepa_fdb = fdb;
+
+       return 0;
+}
+
+static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
+{
+       esw_debug(esw->dev, "Destroy FDB Table\n");
+       if (!esw->fdb_table.legacy.fdb)
+               return;
+
+       if (esw->fdb_table.legacy.promisc_grp)
+               mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
+       if (esw->fdb_table.legacy.allmulti_grp)
+               mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+       if (esw->fdb_table.legacy.addr_grp)
+               mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
+       mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
+
+       esw->fdb_table.legacy.fdb = NULL;
+       esw->fdb_table.legacy.addr_grp = NULL;
+       esw->fdb_table.legacy.allmulti_grp = NULL;
+       esw->fdb_table.legacy.promisc_grp = NULL;
+       atomic64_set(&esw->user_count, 0);
+}
+
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       struct mlx5_flow_table_attr ft_attr = {};
+       struct mlx5_core_dev *dev = esw->dev;
+       struct mlx5_flow_namespace *root_ns;
+       struct mlx5_flow_table *fdb;
+       struct mlx5_flow_group *g;
+       void *match_criteria;
+       int table_size;
+       u32 *flow_group_in;
+       u8 *dmac;
+       int err = 0;
+
+       esw_debug(dev, "Create FDB log_max_size(%d)\n",
+                 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+
+       root_ns = mlx5_get_fdb_sub_ns(dev, 0);
+       if (!root_ns) {
+               esw_warn(dev, "Failed to get FDB flow namespace\n");
+               return -EOPNOTSUPP;
+       }
+
+       flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+       if (!flow_group_in)
+               return -ENOMEM;
+
+       table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+       ft_attr.max_fte = table_size;
+       ft_attr.prio = LEGACY_FDB_PRIO;
+       fdb = mlx5_create_flow_table(root_ns, &ft_attr);
+       if (IS_ERR(fdb)) {
+               err = PTR_ERR(fdb);
+               esw_warn(dev, "Failed to create FDB Table err %d\n", err);
+               goto out;
+       }
+       esw->fdb_table.legacy.fdb = fdb;
+
+       /* Addresses group : Full match unicast/multicast addresses */
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                MLX5_MATCH_OUTER_HEADERS);
+       match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+       dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+       /* Preserve 2 entries for allmulti and promisc rules*/
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
+       eth_broadcast_addr(dmac);
+       g = mlx5_create_flow_group(fdb, flow_group_in);
+       if (IS_ERR(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create flow group err(%d)\n", err);
+               goto out;
+       }
+       esw->fdb_table.legacy.addr_grp = g;
+
+       /* Allmulti group : One rule that forwards any mcast traffic */
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+       eth_zero_addr(dmac);
+       dmac[0] = 0x01;
+       g = mlx5_create_flow_group(fdb, flow_group_in);
+       if (IS_ERR(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+               goto out;
+       }
+       esw->fdb_table.legacy.allmulti_grp = g;
+
+       /* Promiscuous group :
+        * One rule that forward all unmatched traffic from previous groups
+        */
+       eth_zero_addr(dmac);
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                MLX5_MATCH_MISC_PARAMETERS);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+       g = mlx5_create_flow_group(fdb, flow_group_in);
+       if (IS_ERR(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+               goto out;
+       }
+       esw->fdb_table.legacy.promisc_grp = g;
+
+out:
+       if (err)
+               esw_destroy_legacy_fdb_table(esw);
+
+       kvfree(flow_group_in);
+       return err;
+}
+
+static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
+{
+       esw_debug(esw->dev, "Destroy VEPA Table\n");
+       if (!esw->fdb_table.legacy.vepa_fdb)
+               return;
+
+       mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
+       esw->fdb_table.legacy.vepa_fdb = NULL;
+}
+
+static int esw_create_legacy_table(struct mlx5_eswitch *esw)
+{
+       int err;
+
+       memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+       atomic64_set(&esw->user_count, 0);
+
+       err = esw_create_legacy_vepa_table(esw);
+       if (err)
+               return err;
+
+       err = esw_create_legacy_fdb_table(esw);
+       if (err)
+               esw_destroy_legacy_vepa_table(esw);
+
+       return err;
+}
+
+static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
+{
+       if (esw->fdb_table.legacy.vepa_uplink_rule)
+               mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
+
+       if (esw->fdb_table.legacy.vepa_star_rule)
+               mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
+
+       esw->fdb_table.legacy.vepa_uplink_rule = NULL;
+       esw->fdb_table.legacy.vepa_star_rule = NULL;
+}
+
+static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
+{
+       esw_cleanup_vepa_rules(esw);
+       esw_destroy_legacy_fdb_table(esw);
+       esw_destroy_legacy_vepa_table(esw);
+}
+
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+                                       MLX5_VPORT_MC_ADDR_CHANGE | \
+                                       MLX5_VPORT_PROMISC_CHANGE)
+
+int esw_legacy_enable(struct mlx5_eswitch *esw)
+{
+       struct mlx5_vport *vport;
+       int ret, i;
+
+       ret = esw_create_legacy_table(esw);
+       if (ret)
+               return ret;
+
+       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+               vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+
+       ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+       if (ret)
+               esw_destroy_legacy_table(esw);
+       return ret;
+}
+
+void esw_legacy_disable(struct mlx5_eswitch *esw)
+{
+       struct esw_mc_addr *mc_promisc;
+
+       mlx5_eswitch_disable_pf_vf_vports(esw);
+
+       mc_promisc = &esw->mc_promisc;
+       if (mc_promisc->uplink_rule)
+               mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+       esw_destroy_legacy_table(esw);
+}
+
+static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
+                                        u8 setting)
+{
+       struct mlx5_flow_destination dest = {};
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_flow_handle *flow_rule;
+       struct mlx5_flow_spec *spec;
+       int err = 0;
+       void *misc;
+
+       if (!setting) {
+               esw_cleanup_vepa_rules(esw);
+               return 0;
+       }
+
+       if (esw->fdb_table.legacy.vepa_uplink_rule)
+               return 0;
+
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return -ENOMEM;
+
+       /* Uplink rule forward uplink traffic to FDB */
+       misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+       MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
+
+       misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+       MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+       spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest.ft = esw->fdb_table.legacy.fdb;
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
+                                       &flow_act, &dest, 1);
+       if (IS_ERR(flow_rule)) {
+               err = PTR_ERR(flow_rule);
+               goto out;
+       } else {
+               esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
+       }
+
+       /* Star rule to forward all traffic to uplink vport */
+       memset(&dest, 0, sizeof(dest));
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+       dest.vport.num = MLX5_VPORT_UPLINK;
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
+                                       &flow_act, &dest, 1);
+       if (IS_ERR(flow_rule)) {
+               err = PTR_ERR(flow_rule);
+               goto out;
+       } else {
+               esw->fdb_table.legacy.vepa_star_rule = flow_rule;
+       }
+
+out:
+       kvfree(spec);
+       if (err)
+               esw_cleanup_vepa_rules(esw);
+       return err;
+}
+
+int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
+{
+       int err = 0;
+
+       if (!esw)
+               return -EOPNOTSUPP;
+
+       if (!mlx5_esw_allowed(esw))
+               return -EPERM;
+
+       mutex_lock(&esw->state_lock);
+       if (esw->mode != MLX5_ESWITCH_LEGACY) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       err = _mlx5_eswitch_set_vepa_locked(esw, setting);
+
+out:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
+
+int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
+{
+       if (!esw)
+               return -EOPNOTSUPP;
+
+       if (!mlx5_esw_allowed(esw))
+               return -EPERM;
+
+       if (esw->mode != MLX5_ESWITCH_LEGACY)
+               return -EOPNOTSUPP;
+
+       *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
+       return 0;
+}
+
+int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+       int ret;
+
+       /* Only non manager vports need ACL in legacy mode */
+       if (mlx5_esw_is_manager_vport(esw, vport->vport))
+               return 0;
+
+       ret = esw_acl_ingress_lgcy_setup(esw, vport);
+       if (ret)
+               goto ingress_err;
+
+       ret = esw_acl_egress_lgcy_setup(esw, vport);
+       if (ret)
+               goto egress_err;
+
+       return 0;
+
+egress_err:
+       esw_acl_ingress_lgcy_cleanup(esw, vport);
+ingress_err:
+       return ret;
+}
+
+void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+       if (mlx5_esw_is_manager_vport(esw, vport->vport))
+               return;
+
+       esw_acl_egress_lgcy_cleanup(esw, vport);
+       esw_acl_ingress_lgcy_cleanup(esw, vport);
+}
+
+int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
+                                   struct mlx5_vport *vport,
+                                   struct mlx5_vport_drop_stats *stats)
+{
+       u64 rx_discard_vport_down, tx_discard_vport_down;
+       struct mlx5_eswitch *esw = dev->priv.eswitch;
+       u64 bytes = 0;
+       int err = 0;
+
+       if (esw->mode != MLX5_ESWITCH_LEGACY)
+               return 0;
+
+       mutex_lock(&esw->state_lock);
+       if (!vport->enabled)
+               goto unlock;
+
+       if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
+               mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
+                             &stats->rx_dropped, &bytes);
+
+       if (vport->ingress.legacy.drop_counter)
+               mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
+                             &stats->tx_dropped, &bytes);
+
+       if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
+           !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+               goto unlock;
+
+       err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
+                                         &rx_discard_vport_down,
+                                         &tx_discard_vport_down);
+       if (err)
+               goto unlock;
+
+       if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
+               stats->rx_dropped += rx_discard_vport_down;
+       if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+               stats->tx_dropped += tx_discard_vport_down;
+
+unlock:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
+
+int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+                               u16 vport, u16 vlan, u8 qos)
+{
+       u8 set_flags = 0;
+       int err = 0;
+
+       if (!mlx5_esw_allowed(esw))
+               return -EPERM;
+
+       if (vlan || qos)
+               set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
+
+       mutex_lock(&esw->state_lock);
+       if (esw->mode != MLX5_ESWITCH_LEGACY) {
+               if (!vlan)
+                       goto unlock; /* compatibility with libvirt */
+
+               err = -EOPNOTSUPP;
+               goto unlock;
+       }
+
+       err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+
+unlock:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+                                   u16 vport, bool spoofchk)
+{
+       struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+       bool pschk;
+       int err = 0;
+
+       if (!mlx5_esw_allowed(esw))
+               return -EPERM;
+       if (IS_ERR(evport))
+               return PTR_ERR(evport);
+
+       mutex_lock(&esw->state_lock);
+       if (esw->mode != MLX5_ESWITCH_LEGACY) {
+               err = -EOPNOTSUPP;
+               goto unlock;
+       }
+       pschk = evport->info.spoofchk;
+       evport->info.spoofchk = spoofchk;
+       if (pschk && !is_valid_ether_addr(evport->info.mac))
+               mlx5_core_warn(esw->dev,
+                              "Spoofchk in set while MAC is invalid, vport(%d)\n",
+                              evport->vport);
+       if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
+               err = esw_acl_ingress_lgcy_setup(esw, evport);
+       if (err)
+               evport->info.spoofchk = pschk;
+
+unlock:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+                                u16 vport, bool setting)
+{
+       struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+       int err = 0;
+
+       if (!mlx5_esw_allowed(esw))
+               return -EPERM;
+       if (IS_ERR(evport))
+               return PTR_ERR(evport);
+
+       mutex_lock(&esw->state_lock);
+       if (esw->mode != MLX5_ESWITCH_LEGACY) {
+               err = -EOPNOTSUPP;
+               goto unlock;
+       }
+       evport->info.trusted = setting;
+       if (evport->enabled)
+               esw_vport_change_handle_locked(evport);
+
+unlock:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.h
new file mode 100644 (file)
index 0000000..e0820bb
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_ESW_LEGACY_H__
+#define __MLX5_ESW_LEGACY_H__
+
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+                                       MLX5_VPORT_MC_ADDR_CHANGE | \
+                                       MLX5_VPORT_PROMISC_CHANGE)
+
+struct mlx5_eswitch;
+
+int esw_legacy_enable(struct mlx5_eswitch *esw);
+void esw_legacy_disable(struct mlx5_eswitch *esw);
+
+int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
+                                   struct mlx5_vport *vport,
+                                   struct mlx5_vport_drop_stats *stats);
+#endif
index 6cf04a3..1bb229e 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/mlx5/vport.h>
 #include <linux/mlx5/fs.h>
 #include "esw/acl/lgcy.h"
+#include "esw/legacy.h"
 #include "mlx5_core.h"
 #include "lib/eq.h"
 #include "eswitch.h"
@@ -61,9 +62,6 @@ struct vport_addr {
        bool mc_promisc;
 };
 
-static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
-static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
-
 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
 {
        if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
@@ -278,226 +276,6 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
        return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
 }
 
-enum {
-       LEGACY_VEPA_PRIO = 0,
-       LEGACY_FDB_PRIO,
-};
-
-static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
-{
-       struct mlx5_flow_table_attr ft_attr = {};
-       struct mlx5_core_dev *dev = esw->dev;
-       struct mlx5_flow_namespace *root_ns;
-       struct mlx5_flow_table *fdb;
-       int err;
-
-       root_ns = mlx5_get_fdb_sub_ns(dev, 0);
-       if (!root_ns) {
-               esw_warn(dev, "Failed to get FDB flow namespace\n");
-               return -EOPNOTSUPP;
-       }
-
-       /* num FTE 2, num FG 2 */
-       ft_attr.prio = LEGACY_VEPA_PRIO;
-       ft_attr.max_fte = 2;
-       ft_attr.autogroup.max_num_groups = 2;
-       fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
-       if (IS_ERR(fdb)) {
-               err = PTR_ERR(fdb);
-               esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
-               return err;
-       }
-       esw->fdb_table.legacy.vepa_fdb = fdb;
-
-       return 0;
-}
-
-static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
-{
-       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-       struct mlx5_flow_table_attr ft_attr = {};
-       struct mlx5_core_dev *dev = esw->dev;
-       struct mlx5_flow_namespace *root_ns;
-       struct mlx5_flow_table *fdb;
-       struct mlx5_flow_group *g;
-       void *match_criteria;
-       int table_size;
-       u32 *flow_group_in;
-       u8 *dmac;
-       int err = 0;
-
-       esw_debug(dev, "Create FDB log_max_size(%d)\n",
-                 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-
-       root_ns = mlx5_get_fdb_sub_ns(dev, 0);
-       if (!root_ns) {
-               esw_warn(dev, "Failed to get FDB flow namespace\n");
-               return -EOPNOTSUPP;
-       }
-
-       flow_group_in = kvzalloc(inlen, GFP_KERNEL);
-       if (!flow_group_in)
-               return -ENOMEM;
-
-       table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-       ft_attr.max_fte = table_size;
-       ft_attr.prio = LEGACY_FDB_PRIO;
-       fdb = mlx5_create_flow_table(root_ns, &ft_attr);
-       if (IS_ERR(fdb)) {
-               err = PTR_ERR(fdb);
-               esw_warn(dev, "Failed to create FDB Table err %d\n", err);
-               goto out;
-       }
-       esw->fdb_table.legacy.fdb = fdb;
-
-       /* Addresses group : Full match unicast/multicast addresses */
-       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-                MLX5_MATCH_OUTER_HEADERS);
-       match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-       dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
-       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
-       /* Preserve 2 entries for allmulti and promisc rules*/
-       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
-       eth_broadcast_addr(dmac);
-       g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR(g)) {
-               err = PTR_ERR(g);
-               esw_warn(dev, "Failed to create flow group err(%d)\n", err);
-               goto out;
-       }
-       esw->fdb_table.legacy.addr_grp = g;
-
-       /* Allmulti group : One rule that forwards any mcast traffic */
-       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-                MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
-       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
-       eth_zero_addr(dmac);
-       dmac[0] = 0x01;
-       g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR(g)) {
-               err = PTR_ERR(g);
-               esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
-               goto out;
-       }
-       esw->fdb_table.legacy.allmulti_grp = g;
-
-       /* Promiscuous group :
-        * One rule that forward all unmatched traffic from previous groups
-        */
-       eth_zero_addr(dmac);
-       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-                MLX5_MATCH_MISC_PARAMETERS);
-       MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
-       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
-       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
-       g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR(g)) {
-               err = PTR_ERR(g);
-               esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
-               goto out;
-       }
-       esw->fdb_table.legacy.promisc_grp = g;
-
-out:
-       if (err)
-               esw_destroy_legacy_fdb_table(esw);
-
-       kvfree(flow_group_in);
-       return err;
-}
-
-static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
-{
-       esw_debug(esw->dev, "Destroy VEPA Table\n");
-       if (!esw->fdb_table.legacy.vepa_fdb)
-               return;
-
-       mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
-       esw->fdb_table.legacy.vepa_fdb = NULL;
-}
-
-static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
-{
-       esw_debug(esw->dev, "Destroy FDB Table\n");
-       if (!esw->fdb_table.legacy.fdb)
-               return;
-
-       if (esw->fdb_table.legacy.promisc_grp)
-               mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
-       if (esw->fdb_table.legacy.allmulti_grp)
-               mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
-       if (esw->fdb_table.legacy.addr_grp)
-               mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
-       mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
-
-       esw->fdb_table.legacy.fdb = NULL;
-       esw->fdb_table.legacy.addr_grp = NULL;
-       esw->fdb_table.legacy.allmulti_grp = NULL;
-       esw->fdb_table.legacy.promisc_grp = NULL;
-       atomic64_set(&esw->user_count, 0);
-}
-
-static int esw_create_legacy_table(struct mlx5_eswitch *esw)
-{
-       int err;
-
-       memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
-       atomic64_set(&esw->user_count, 0);
-
-       err = esw_create_legacy_vepa_table(esw);
-       if (err)
-               return err;
-
-       err = esw_create_legacy_fdb_table(esw);
-       if (err)
-               esw_destroy_legacy_vepa_table(esw);
-
-       return err;
-}
-
-static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
-{
-       esw_cleanup_vepa_rules(esw);
-       esw_destroy_legacy_fdb_table(esw);
-       esw_destroy_legacy_vepa_table(esw);
-}
-
-#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
-                                       MLX5_VPORT_MC_ADDR_CHANGE | \
-                                       MLX5_VPORT_PROMISC_CHANGE)
-
-static int esw_legacy_enable(struct mlx5_eswitch *esw)
-{
-       struct mlx5_vport *vport;
-       int ret, i;
-
-       ret = esw_create_legacy_table(esw);
-       if (ret)
-               return ret;
-
-       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
-               vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
-
-       ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
-       if (ret)
-               esw_destroy_legacy_table(esw);
-       return ret;
-}
-
-static void esw_legacy_disable(struct mlx5_eswitch *esw)
-{
-       struct esw_mc_addr *mc_promisc;
-
-       mlx5_eswitch_disable_pf_vf_vports(esw);
-
-       mc_promisc = &esw->mc_promisc;
-       if (mc_promisc->uplink_rule)
-               mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
-       esw_destroy_legacy_table(esw);
-}
-
 /* E-Switch vport UC/MC lists management */
 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
                                 struct vport_addr *vaddr);
@@ -919,7 +697,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
                                (promisc_all || promisc_mc));
 }
 
-static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
+void esw_vport_change_handle_locked(struct mlx5_vport *vport)
 {
        struct mlx5_core_dev *dev = vport->dev;
        struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1170,56 +948,20 @@ static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
        ((u8 *)node_guid)[0] = mac[5];
 }
 
-static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
-                                             struct mlx5_vport *vport)
-{
-       int ret;
-
-       /* Only non manager vports need ACL in legacy mode */
-       if (mlx5_esw_is_manager_vport(esw, vport->vport))
-               return 0;
-
-       ret = esw_acl_ingress_lgcy_setup(esw, vport);
-       if (ret)
-               goto ingress_err;
-
-       ret = esw_acl_egress_lgcy_setup(esw, vport);
-       if (ret)
-               goto egress_err;
-
-       return 0;
-
-egress_err:
-       esw_acl_ingress_lgcy_cleanup(esw, vport);
-ingress_err:
-       return ret;
-}
-
 static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
                               struct mlx5_vport *vport)
 {
        if (esw->mode == MLX5_ESWITCH_LEGACY)
-               return esw_vport_create_legacy_acl_tables(esw, vport);
+               return esw_legacy_vport_acl_setup(esw, vport);
        else
                return esw_vport_create_offloads_acl_tables(esw, vport);
 }
 
-static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
-                                               struct mlx5_vport *vport)
-
-{
-       if (mlx5_esw_is_manager_vport(esw, vport->vport))
-               return;
-
-       esw_acl_egress_lgcy_cleanup(esw, vport);
-       esw_acl_ingress_lgcy_cleanup(esw, vport);
-}
-
 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
                                  struct mlx5_vport *vport)
 {
        if (esw->mode == MLX5_ESWITCH_LEGACY)
-               esw_vport_destroy_legacy_acl_tables(esw, vport);
+               esw_legacy_vport_acl_cleanup(esw, vport);
        else
                esw_vport_destroy_offloads_acl_tables(esw, vport);
 }
@@ -1390,15 +1132,9 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
 {
        int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
        u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
-       u16 max_sf_vports;
        u32 *out;
        int err;
 
-       max_sf_vports = mlx5_sf_max_functions(dev);
-       /* Device interface is array of 64-bits */
-       if (max_sf_vports)
-               outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64);
-
        out = kvzalloc(outlen, GFP_KERNEL);
        if (!out)
                return ERR_PTR(-ENOMEM);
@@ -1449,8 +1185,6 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
 }
 
 /* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-
 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
                            enum mlx5_eswitch_vport_event enabled_events)
 {
@@ -1633,6 +1367,47 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
        blocking_notifier_call_chain(&esw->n_head, 0, &info);
 }
 
+static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
+{
+       struct mlx5_core_dev *dev = esw->dev;
+       int total_vports;
+       int err;
+
+       total_vports = mlx5_eswitch_get_total_vports(dev);
+
+       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+               err = mlx5_fs_egress_acls_init(dev, total_vports);
+               if (err)
+                       return err;
+       } else {
+               esw_warn(dev, "engress ACL is not supported by FW\n");
+       }
+
+       if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+               err = mlx5_fs_ingress_acls_init(dev, total_vports);
+               if (err)
+                       goto err;
+       } else {
+               esw_warn(dev, "ingress ACL is not supported by FW\n");
+       }
+       return 0;
+
+err:
+       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+               mlx5_fs_egress_acls_cleanup(dev);
+       return err;
+}
+
+static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
+{
+       struct mlx5_core_dev *dev = esw->dev;
+
+       if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+               mlx5_fs_ingress_acls_cleanup(dev);
+       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+               mlx5_fs_egress_acls_cleanup(dev);
+}
+
 /**
  * mlx5_eswitch_enable_locked - Enable eswitch
  * @esw:       Pointer to eswitch
@@ -1661,14 +1436,12 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
                return -EOPNOTSUPP;
        }
 
-       if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
-               esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
-
-       if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
-               esw_warn(esw->dev, "engress ACL is not supported by FW\n");
-
        mlx5_eswitch_get_devlink_param(esw);
 
+       err = mlx5_esw_acls_ns_init(esw);
+       if (err)
+               return err;
+
        mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
 
        esw_create_tsar(esw);
@@ -1704,6 +1477,7 @@ abort:
                mlx5_rescan_drivers(esw->dev);
 
        esw_destroy_tsar(esw);
+       mlx5_esw_acls_ns_cleanup(esw);
        return err;
 }
 
@@ -1719,7 +1493,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
 {
        int ret;
 
-       if (!ESW_ALLOWED(esw))
+       if (!mlx5_esw_allowed(esw))
                return 0;
 
        down_write(&esw->mode_lock);
@@ -1772,6 +1546,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
                mlx5_rescan_drivers(esw->dev);
 
        esw_destroy_tsar(esw);
+       mlx5_esw_acls_ns_cleanup(esw);
 
        if (clear_vf)
                mlx5_eswitch_clear_vf_vports_info(esw);
@@ -1779,7 +1554,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
 
 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
 {
-       if (!ESW_ALLOWED(esw))
+       if (!mlx5_esw_allowed(esw))
                return;
 
        down_write(&esw->mode_lock);
@@ -1862,7 +1637,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 abort:
        if (esw->work_queue)
                destroy_workqueue(esw->work_queue);
-       esw_offloads_cleanup_reps(esw);
        kfree(esw->vports);
        kfree(esw);
        return err;
@@ -1877,7 +1651,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 
        esw->dev->priv.eswitch = NULL;
        destroy_workqueue(esw->work_queue);
-       esw_offloads_cleanup_reps(esw);
        mutex_destroy(&esw->state_lock);
        WARN_ON(!xa_empty(&esw->offloads.vhca_map));
        xa_destroy(&esw->offloads.vhca_map);
@@ -1885,6 +1658,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
        mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
        mutex_destroy(&esw->offloads.encap_tbl_lock);
        mutex_destroy(&esw->offloads.decap_tbl_lock);
+       esw_offloads_cleanup_reps(esw);
        kfree(esw->vports);
        kfree(esw);
 }
@@ -2030,7 +1804,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
        int other_vport = 1;
        int err = 0;
 
-       if (!ESW_ALLOWED(esw))
+       if (!mlx5_esw_allowed(esw))
                return -EPERM;
        if (IS_ERR(evport))
                return PTR_ERR(evport);
@@ -2112,205 +1886,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
        return err;
 }
 
-int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
-                               u16 vport, u16 vlan, u8 qos)
-{
-       u8 set_flags = 0;
-       int err = 0;
-
-       if (!ESW_ALLOWED(esw))
-               return -EPERM;
-
-       if (vlan || qos)
-               set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
-
-       mutex_lock(&esw->state_lock);
-       if (esw->mode != MLX5_ESWITCH_LEGACY) {
-               if (!vlan)
-                       goto unlock; /* compatibility with libvirt */
-
-               err = -EOPNOTSUPP;
-               goto unlock;
-       }
-
-       err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
-
-unlock:
-       mutex_unlock(&esw->state_lock);
-       return err;
-}
-
-int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
-                                   u16 vport, bool spoofchk)
-{
-       struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
-       bool pschk;
-       int err = 0;
-
-       if (!ESW_ALLOWED(esw))
-               return -EPERM;
-       if (IS_ERR(evport))
-               return PTR_ERR(evport);
-
-       mutex_lock(&esw->state_lock);
-       if (esw->mode != MLX5_ESWITCH_LEGACY) {
-               err = -EOPNOTSUPP;
-               goto unlock;
-       }
-       pschk = evport->info.spoofchk;
-       evport->info.spoofchk = spoofchk;
-       if (pschk && !is_valid_ether_addr(evport->info.mac))
-               mlx5_core_warn(esw->dev,
-                              "Spoofchk in set while MAC is invalid, vport(%d)\n",
-                              evport->vport);
-       if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
-               err = esw_acl_ingress_lgcy_setup(esw, evport);
-       if (err)
-               evport->info.spoofchk = pschk;
-
-unlock:
-       mutex_unlock(&esw->state_lock);
-       return err;
-}
-
-static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
-{
-       if (esw->fdb_table.legacy.vepa_uplink_rule)
-               mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
-
-       if (esw->fdb_table.legacy.vepa_star_rule)
-               mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
-
-       esw->fdb_table.legacy.vepa_uplink_rule = NULL;
-       esw->fdb_table.legacy.vepa_star_rule = NULL;
-}
-
-static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
-                                        u8 setting)
-{
-       struct mlx5_flow_destination dest = {};
-       struct mlx5_flow_act flow_act = {};
-       struct mlx5_flow_handle *flow_rule;
-       struct mlx5_flow_spec *spec;
-       int err = 0;
-       void *misc;
-
-       if (!setting) {
-               esw_cleanup_vepa_rules(esw);
-               return 0;
-       }
-
-       if (esw->fdb_table.legacy.vepa_uplink_rule)
-               return 0;
-
-       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
-       if (!spec)
-               return -ENOMEM;
-
-       /* Uplink rule forward uplink traffic to FDB */
-       misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
-       MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
-
-       misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
-       MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
-
-       spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
-       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-       dest.ft = esw->fdb_table.legacy.fdb;
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-       flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
-                                       &flow_act, &dest, 1);
-       if (IS_ERR(flow_rule)) {
-               err = PTR_ERR(flow_rule);
-               goto out;
-       } else {
-               esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
-       }
-
-       /* Star rule to forward all traffic to uplink vport */
-       memset(&dest, 0, sizeof(dest));
-       dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
-       dest.vport.num = MLX5_VPORT_UPLINK;
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-       flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
-                                       &flow_act, &dest, 1);
-       if (IS_ERR(flow_rule)) {
-               err = PTR_ERR(flow_rule);
-               goto out;
-       } else {
-               esw->fdb_table.legacy.vepa_star_rule = flow_rule;
-       }
-
-out:
-       kvfree(spec);
-       if (err)
-               esw_cleanup_vepa_rules(esw);
-       return err;
-}
-
-int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
-{
-       int err = 0;
-
-       if (!esw)
-               return -EOPNOTSUPP;
-
-       if (!ESW_ALLOWED(esw))
-               return -EPERM;
-
-       mutex_lock(&esw->state_lock);
-       if (esw->mode != MLX5_ESWITCH_LEGACY) {
-               err = -EOPNOTSUPP;
-               goto out;
-       }
-
-       err = _mlx5_eswitch_set_vepa_locked(esw, setting);
-
-out:
-       mutex_unlock(&esw->state_lock);
-       return err;
-}
-
-int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
-{
-       if (!esw)
-               return -EOPNOTSUPP;
-
-       if (!ESW_ALLOWED(esw))
-               return -EPERM;
-
-       if (esw->mode != MLX5_ESWITCH_LEGACY)
-               return -EOPNOTSUPP;
-
-       *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
-       return 0;
-}
-
-int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
-                                u16 vport, bool setting)
-{
-       struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
-       int err = 0;
-
-       if (!ESW_ALLOWED(esw))
-               return -EPERM;
-       if (IS_ERR(evport))
-               return PTR_ERR(evport);
-
-       mutex_lock(&esw->state_lock);
-       if (esw->mode != MLX5_ESWITCH_LEGACY) {
-               err = -EOPNOTSUPP;
-               goto unlock;
-       }
-       evport->info.trusted = setting;
-       if (evport->enabled)
-               esw_vport_change_handle_locked(evport);
-
-unlock:
-       mutex_unlock(&esw->state_lock);
-       return err;
-}
-
 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
 {
        u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
@@ -2376,7 +1951,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
        bool max_rate_supported;
        int err = 0;
 
-       if (!ESW_ALLOWED(esw))
+       if (!mlx5_esw_allowed(esw))
                return -EPERM;
        if (IS_ERR(evport))
                return PTR_ERR(evport);
@@ -2415,50 +1990,6 @@ unlock:
        return err;
 }
 
-static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
-                                              struct mlx5_vport *vport,
-                                              struct mlx5_vport_drop_stats *stats)
-{
-       struct mlx5_eswitch *esw = dev->priv.eswitch;
-       u64 rx_discard_vport_down, tx_discard_vport_down;
-       u64 bytes = 0;
-       int err = 0;
-
-       if (esw->mode != MLX5_ESWITCH_LEGACY)
-               return 0;
-
-       mutex_lock(&esw->state_lock);
-       if (!vport->enabled)
-               goto unlock;
-
-       if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
-               mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
-                             &stats->rx_dropped, &bytes);
-
-       if (vport->ingress.legacy.drop_counter)
-               mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
-                             &stats->tx_dropped, &bytes);
-
-       if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
-           !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
-               goto unlock;
-
-       err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
-                                         &rx_discard_vport_down,
-                                         &tx_discard_vport_down);
-       if (err)
-               goto unlock;
-
-       if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
-               stats->rx_dropped += rx_discard_vport_down;
-       if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
-               stats->tx_dropped += tx_discard_vport_down;
-
-unlock:
-       mutex_unlock(&esw->state_lock);
-       return err;
-}
-
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
                                 u16 vport_num,
                                 struct ifla_vf_stats *vf_stats)
@@ -2526,7 +2057,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
        vf_stats->broadcast =
                MLX5_GET_CTR(out, received_eth_broadcast.packets);
 
-       err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+       err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats);
        if (err)
                goto free_out;
        vf_stats->rx_dropped = stats.rx_dropped;
@@ -2541,7 +2072,7 @@ u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
 {
        struct mlx5_eswitch *esw = dev->priv.eswitch;
 
-       return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
+       return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
 }
 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
 
@@ -2551,7 +2082,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
        struct mlx5_eswitch *esw;
 
        esw = dev->priv.eswitch;
-       return ESW_ALLOWED(esw) ? esw->offloads.encap :
+       return mlx5_esw_allowed(esw) ? esw->offloads.encap :
                DEVLINK_ESWITCH_ENCAP_MODE_NONE;
 }
 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
@@ -2597,7 +2128,7 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
 
        /* e.g. VF doesn't have eswitch so nothing to do */
-       if (!ESW_ALLOWED(esw))
+       if (!mlx5_esw_allowed(esw))
                return true;
 
        if (down_read_trylock(&esw->mode_lock) != 0)
@@ -2614,7 +2145,7 @@ void mlx5_esw_release(struct mlx5_core_dev *mdev)
 {
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
 
-       if (ESW_ALLOWED(esw))
+       if (mlx5_esw_allowed(esw))
                up_read(&esw->mode_lock);
 }
 
@@ -2626,7 +2157,7 @@ void mlx5_esw_get(struct mlx5_core_dev *mdev)
 {
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
 
-       if (ESW_ALLOWED(esw))
+       if (mlx5_esw_allowed(esw))
                atomic64_inc(&esw->user_count);
 }
 
@@ -2638,7 +2169,7 @@ void mlx5_esw_put(struct mlx5_core_dev *mdev)
 {
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
 
-       if (ESW_ALLOWED(esw))
+       if (mlx5_esw_allowed(esw))
                atomic64_dec_if_positive(&esw->user_count);
 }
 
index deafb0e..b289d75 100644 (file)
@@ -152,7 +152,6 @@ enum mlx5_eswitch_vport_event {
 
 struct mlx5_vport {
        struct mlx5_core_dev    *dev;
-       int                     vport;
        struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
        struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
        struct mlx5_flow_handle *promisc_rule;
@@ -174,6 +173,7 @@ struct mlx5_vport {
                u32 max_rate;
        } qos;
 
+       u16 vport;
        bool                    enabled;
        enum mlx5_eswitch_vport_event enabled_events;
        struct devlink_port *dl_port;
@@ -314,6 +314,8 @@ int esw_offloads_enable(struct mlx5_eswitch *esw);
 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
 int esw_offloads_init_reps(struct mlx5_eswitch *esw);
 
+bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
+int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
 
@@ -519,6 +521,11 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
 #define esw_debug(dev, format, ...)                            \
        mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
 
+static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
+{
+       return esw && MLX5_ESWITCH_MANAGER(esw->dev);
+}
+
 /* The returned number is valid only when the dev is eswitch manager. */
 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
 {
@@ -807,6 +814,8 @@ void mlx5_esw_put(struct mlx5_core_dev *dev);
 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
 
+void esw_vport_change_handle_locked(struct mlx5_vport *vport);
+
 #else  /* CONFIG_MLX5_ESWITCH */
 /* eswitch API stubs */
 static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
index ab32f68..bbb7071 100644 (file)
@@ -986,12 +986,13 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
 static int
 mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
 {
-       int num_vfs, vport_num, rule_idx = 0, err = 0;
        struct mlx5_flow_destination dest = {};
        struct mlx5_flow_act flow_act = {0};
+       int num_vfs, rule_idx = 0, err = 0;
        struct mlx5_flow_handle *flow_rule;
        struct mlx5_flow_handle **flows;
        struct mlx5_flow_spec *spec;
+       u16 vport_num;
 
        num_vfs = esw->esw_funcs.num_vfs;
        flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
@@ -2351,8 +2352,7 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
        mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
 }
 
-static bool
-esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
+bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
 {
        if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
                return false;
@@ -2452,6 +2452,28 @@ metadata_err:
        return err;
 }
 
+int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
+{
+       int err = 0;
+
+       down_write(&esw->mode_lock);
+       if (esw->mode != MLX5_ESWITCH_NONE) {
+               err = -EBUSY;
+               goto done;
+       }
+       if (!mlx5_esw_vport_match_metadata_supported(esw)) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+       if (enable)
+               esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+       else
+               esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
+done:
+       up_write(&esw->mode_lock);
+       return err;
+}
+
 int
 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
                                     struct mlx5_vport *vport)
@@ -2673,9 +2695,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
        if (err)
                goto err_metadata;
 
-       if (esw_check_vport_match_metadata_supported(esw))
-               esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
        err = esw_offloads_metadata_init(esw);
        if (err)
                goto err_metadata;
@@ -2725,7 +2744,6 @@ err_pool:
 err_vport_metadata:
        esw_offloads_metadata_uninit(esw);
 err_metadata:
-       esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
        mlx5_rdma_disable_roce(esw->dev);
        mutex_destroy(&esw->offloads.termtbl_mutex);
        return err;
@@ -2761,7 +2779,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
        esw_offloads_steering_cleanup(esw);
        mapping_destroy(esw->offloads.reg_c0_obj_pool);
        esw_offloads_metadata_uninit(esw);
-       esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
        mlx5_rdma_disable_roce(esw->dev);
        mutex_destroy(&esw->offloads.termtbl_mutex);
        esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
index d43a05e..0bba92c 100644 (file)
@@ -850,7 +850,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
                return;
        }
 
-       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
+       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
            MLX5_ACCEL_ESP_ACTION_DECRYPT)
                ida_free(&fipsec->halloc, sa_ctx->sa_handle);
 
@@ -1085,6 +1085,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
        rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
        if (IS_ERR(rule->ctx)) {
                int err = PTR_ERR(rule->ctx);
+
                kfree(rule);
                return err;
        }
index 0216bd6..f74d2c8 100644 (file)
@@ -2229,17 +2229,21 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
 {
        struct mlx5_flow_steering *steering = dev->priv.steering;
 
-       if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
+       if (!steering)
                return NULL;
 
        switch (type) {
        case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+               if (vport >= steering->esw_egress_acl_vports)
+                       return NULL;
                if (steering->esw_egress_root_ns &&
                    steering->esw_egress_root_ns[vport])
                        return &steering->esw_egress_root_ns[vport]->ns;
                else
                        return NULL;
        case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+               if (vport >= steering->esw_ingress_acl_vports)
+                       return NULL;
                if (steering->esw_ingress_root_ns &&
                    steering->esw_ingress_root_ns[vport])
                        return &steering->esw_ingress_root_ns[vport]->ns;
@@ -2571,43 +2575,11 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
        clean_tree(&root_ns->ns.node);
 }
 
-static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
-{
-       struct mlx5_flow_steering *steering = dev->priv.steering;
-       int i;
-
-       if (!steering->esw_egress_root_ns)
-               return;
-
-       for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
-               cleanup_root_ns(steering->esw_egress_root_ns[i]);
-
-       kfree(steering->esw_egress_root_ns);
-       steering->esw_egress_root_ns = NULL;
-}
-
-static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
-{
-       struct mlx5_flow_steering *steering = dev->priv.steering;
-       int i;
-
-       if (!steering->esw_ingress_root_ns)
-               return;
-
-       for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
-               cleanup_root_ns(steering->esw_ingress_root_ns[i]);
-
-       kfree(steering->esw_ingress_root_ns);
-       steering->esw_ingress_root_ns = NULL;
-}
-
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
        struct mlx5_flow_steering *steering = dev->priv.steering;
 
        cleanup_root_ns(steering->root_ns);
-       cleanup_egress_acls_root_ns(dev);
-       cleanup_ingress_acls_root_ns(dev);
        cleanup_root_ns(steering->fdb_root_ns);
        steering->fdb_root_ns = NULL;
        kfree(steering->fdb_sub_ns);
@@ -2852,10 +2824,9 @@ static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vpo
        return PTR_ERR_OR_ZERO(prio);
 }
 
-static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
 {
        struct mlx5_flow_steering *steering = dev->priv.steering;
-       int total_vports = mlx5_eswitch_get_total_vports(dev);
        int err;
        int i;
 
@@ -2871,7 +2842,7 @@ static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
                if (err)
                        goto cleanup_root_ns;
        }
-
+       steering->esw_egress_acl_vports = total_vports;
        return 0;
 
 cleanup_root_ns:
@@ -2882,10 +2853,24 @@ cleanup_root_ns:
        return err;
 }
 
-static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+       struct mlx5_flow_steering *steering = dev->priv.steering;
+       int i;
+
+       if (!steering->esw_egress_root_ns)
+               return;
+
+       for (i = 0; i < steering->esw_egress_acl_vports; i++)
+               cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+       kfree(steering->esw_egress_root_ns);
+       steering->esw_egress_root_ns = NULL;
+}
+
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
 {
        struct mlx5_flow_steering *steering = dev->priv.steering;
-       int total_vports = mlx5_eswitch_get_total_vports(dev);
        int err;
        int i;
 
@@ -2901,7 +2886,7 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
                if (err)
                        goto cleanup_root_ns;
        }
-
+       steering->esw_ingress_acl_vports = total_vports;
        return 0;
 
 cleanup_root_ns:
@@ -2912,6 +2897,21 @@ cleanup_root_ns:
        return err;
 }
 
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+       struct mlx5_flow_steering *steering = dev->priv.steering;
+       int i;
+
+       if (!steering->esw_ingress_root_ns)
+               return;
+
+       for (i = 0; i < steering->esw_ingress_acl_vports; i++)
+               cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+       kfree(steering->esw_ingress_root_ns);
+       steering->esw_ingress_root_ns = NULL;
+}
+
 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
 {
        int err;
@@ -2974,16 +2974,6 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
                        if (err)
                                goto err;
                }
-               if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-                       err = init_egress_acls_root_ns(dev);
-                       if (err)
-                               goto err;
-               }
-               if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-                       err = init_ingress_acls_root_ns(dev);
-                       if (err)
-                               goto err;
-               }
        }
 
        if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
index b24a984..e577a2c 100644 (file)
@@ -129,6 +129,8 @@ struct mlx5_flow_steering {
        struct mlx5_flow_root_namespace *rdma_rx_root_ns;
        struct mlx5_flow_root_namespace *rdma_tx_root_ns;
        struct mlx5_flow_root_namespace *egress_root_ns;
+       int esw_egress_acl_vports;
+       int esw_ingress_acl_vports;
 };
 
 struct fs_node {
@@ -287,6 +289,11 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
 int mlx5_init_fs(struct mlx5_core_dev *dev);
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
 
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+
 #define fs_get_obj(v, _node)  {v = container_of((_node), typeof(*v), node); }
 
 #define fs_list_for_each_entry(pos, root)              \
index 127bb92..b874839 100644 (file)
@@ -603,8 +603,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
        if (err)
                mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
                              err);
-
-       return;
 }
 
 /* Must be called with intf_mutex held */
index 19e3e97..1f907df 100644 (file)
@@ -167,7 +167,6 @@ static void irq_set_name(char *name, int vecidx)
 
        snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
                 vecidx - MLX5_IRQ_VEC_COMP_BASE);
-       return;
 }
 
 static int request_irqs(struct mlx5_core_dev *dev, int nvec)
index 8e0dddc..441b545 100644 (file)
@@ -180,5 +180,4 @@ del_roce_addr:
        mlx5_rdma_del_roce_addr(dev);
 disable_roce:
        mlx5_nic_vport_disable_roce(dev);
-       return;
 }
index 60a6328..52226d9 100644 (file)
@@ -270,15 +270,14 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
 {
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        struct mlx5_sf *sf;
-       u16 hw_fn_id;
        int err;
 
        sf = mlx5_sf_alloc(table, new_attr->sfnum, extack);
        if (IS_ERR(sf))
                return PTR_ERR(sf);
 
-       hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id);
-       err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum);
+       err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
+                                               new_attr->sfnum);
        if (err)
                goto esw_err;
        *new_port_index = sf->port_index;
index c9bddde..ec53c11 100644 (file)
@@ -67,8 +67,8 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
                goto exist_err;
        }
 
-       hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
-       err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id);
+       hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sw_id);
+       err = mlx5_cmd_alloc_sf(dev, hw_fn_id);
        if (err)
                goto err;
 
@@ -80,7 +80,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
        return sw_id;
 
 vhca_err:
-       mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+       mlx5_cmd_dealloc_sf(dev, hw_fn_id);
 err:
        table->sfs[i].allocated = false;
 exist_err:
@@ -93,8 +93,8 @@ static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id)
        struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
        u16 hw_fn_id;
 
-       hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id);
-       mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+       hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
+       mlx5_cmd_dealloc_sf(dev, hw_fn_id);
        table->sfs[id].allocated = false;
        table->sfs[id].pending_delete = false;
 }
@@ -123,7 +123,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
                goto err;
        state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
        if (state == MLX5_VHCA_STATE_ALLOCATED) {
-               mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+               mlx5_cmd_dealloc_sf(dev, hw_fn_id);
                table->sfs[id].allocated = false;
        } else {
                table->sfs[id].pending_delete = true;
@@ -216,7 +216,7 @@ int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
                return 0;
 
        table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
-       return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
+       return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
 }
 
 void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
@@ -226,7 +226,7 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
        if (!table)
                return;
 
-       mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
+       mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
        /* Dealloc SFs whose firmware event has been missed. */
        mlx5_sf_hw_dealloc_all(table);
 }
index 28a7971..949879c 100644 (file)
@@ -313,8 +313,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
                 * table, since there is an *assumption* that in such case FW
                 * will recalculate the CS.
                 */
-               if (dest_action->dest_tbl.is_fw_tbl) {
-                       *final_icm_addr = dest_action->dest_tbl.fw_tbl.rx_icm_addr;
+               if (dest_action->dest_tbl->is_fw_tbl) {
+                       *final_icm_addr = dest_action->dest_tbl->fw_tbl.rx_icm_addr;
                } else {
                        mlx5dr_dbg(dmn,
                                   "Destination FT should be terminating when modify TTL is used\n");
@@ -326,8 +326,8 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
                /* If destination is vport we will get the FW flow table
                 * that recalculates the CS and forwards to the vport.
                 */
-               ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn,
-                                                               dest_action->vport.caps->num,
+               ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn,
+                                                               dest_action->vport->caps->num,
                                                                final_icm_addr);
                if (ret) {
                        mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
@@ -369,6 +369,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
        action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type);
 
        for (i = 0; i < num_actions; i++) {
+               struct mlx5dr_action_dest_tbl *dest_tbl;
                struct mlx5dr_action *action;
                int max_actions_type = 1;
                u32 action_type;
@@ -382,37 +383,38 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                        break;
                case DR_ACTION_TYP_FT:
                        dest_action = action;
-                       if (!action->dest_tbl.is_fw_tbl) {
-                               if (action->dest_tbl.tbl->dmn != dmn) {
+                       dest_tbl = action->dest_tbl;
+                       if (!dest_tbl->is_fw_tbl) {
+                               if (dest_tbl->tbl->dmn != dmn) {
                                        mlx5dr_err(dmn,
                                                   "Destination table belongs to a different domain\n");
                                        goto out_invalid_arg;
                                }
-                               if (action->dest_tbl.tbl->level <= matcher->tbl->level) {
+                               if (dest_tbl->tbl->level <= matcher->tbl->level) {
                                        mlx5_core_warn_once(dmn->mdev,
                                                            "Connecting table to a lower/same level destination table\n");
                                        mlx5dr_dbg(dmn,
                                                   "Connecting table at level %d to a destination table at level %d\n",
                                                   matcher->tbl->level,
-                                                  action->dest_tbl.tbl->level);
+                                                  dest_tbl->tbl->level);
                                }
                                attr.final_icm_addr = rx_rule ?
-                                       action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
-                                       action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr;
+                                       dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
+                                       dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
                        } else {
                                struct mlx5dr_cmd_query_flow_table_details output;
                                int ret;
 
                                /* get the relevant addresses */
-                               if (!action->dest_tbl.fw_tbl.rx_icm_addr) {
+                               if (!action->dest_tbl->fw_tbl.rx_icm_addr) {
                                        ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
-                                                                         action->dest_tbl.fw_tbl.type,
-                                                                         action->dest_tbl.fw_tbl.id,
+                                                                         dest_tbl->fw_tbl.type,
+                                                                         dest_tbl->fw_tbl.id,
                                                                          &output);
                                        if (!ret) {
-                                               action->dest_tbl.fw_tbl.tx_icm_addr =
+                                               dest_tbl->fw_tbl.tx_icm_addr =
                                                        output.sw_owner_icm_root_1;
-                                               action->dest_tbl.fw_tbl.rx_icm_addr =
+                                               dest_tbl->fw_tbl.rx_icm_addr =
                                                        output.sw_owner_icm_root_0;
                                        } else {
                                                mlx5dr_err(dmn,
@@ -422,50 +424,50 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                                        }
                                }
                                attr.final_icm_addr = rx_rule ?
-                                       action->dest_tbl.fw_tbl.rx_icm_addr :
-                                       action->dest_tbl.fw_tbl.tx_icm_addr;
+                                       dest_tbl->fw_tbl.rx_icm_addr :
+                                       dest_tbl->fw_tbl.tx_icm_addr;
                        }
                        break;
                case DR_ACTION_TYP_QP:
                        mlx5dr_info(dmn, "Domain doesn't support QP\n");
                        goto out_invalid_arg;
                case DR_ACTION_TYP_CTR:
-                       attr.ctr_id = action->ctr.ctr_id +
-                               action->ctr.offeset;
+                       attr.ctr_id = action->ctr->ctr_id +
+                               action->ctr->offeset;
                        break;
                case DR_ACTION_TYP_TAG:
-                       attr.flow_tag = action->flow_tag;
+                       attr.flow_tag = action->flow_tag->flow_tag;
                        break;
                case DR_ACTION_TYP_TNL_L2_TO_L2:
                        break;
                case DR_ACTION_TYP_TNL_L3_TO_L2:
-                       attr.decap_index = action->rewrite.index;
-                       attr.decap_actions = action->rewrite.num_of_actions;
+                       attr.decap_index = action->rewrite->index;
+                       attr.decap_actions = action->rewrite->num_of_actions;
                        attr.decap_with_vlan =
                                attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS;
                        break;
                case DR_ACTION_TYP_MODIFY_HDR:
-                       attr.modify_index = action->rewrite.index;
-                       attr.modify_actions = action->rewrite.num_of_actions;
-                       recalc_cs_required = action->rewrite.modify_ttl &&
+                       attr.modify_index = action->rewrite->index;
+                       attr.modify_actions = action->rewrite->num_of_actions;
+                       recalc_cs_required = action->rewrite->modify_ttl &&
                                             !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
                        break;
                case DR_ACTION_TYP_L2_TO_TNL_L2:
                case DR_ACTION_TYP_L2_TO_TNL_L3:
-                       attr.reformat_size = action->reformat.reformat_size;
-                       attr.reformat_id = action->reformat.reformat_id;
+                       attr.reformat_size = action->reformat->reformat_size;
+                       attr.reformat_id = action->reformat->reformat_id;
                        break;
                case DR_ACTION_TYP_VPORT:
-                       attr.hit_gvmi = action->vport.caps->vhca_gvmi;
+                       attr.hit_gvmi = action->vport->caps->vhca_gvmi;
                        dest_action = action;
                        if (rx_rule) {
                                /* Loopback on WIRE vport is not supported */
-                               if (action->vport.caps->num == WIRE_PORT)
+                               if (action->vport->caps->num == WIRE_PORT)
                                        goto out_invalid_arg;
 
-                               attr.final_icm_addr = action->vport.caps->icm_address_rx;
+                               attr.final_icm_addr = action->vport->caps->icm_address_rx;
                        } else {
-                               attr.final_icm_addr = action->vport.caps->icm_address_tx;
+                               attr.final_icm_addr = action->vport->caps->icm_address_tx;
                        }
                        break;
                case DR_ACTION_TYP_POP_VLAN:
@@ -477,7 +479,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                        if (attr.vlans.count == MLX5DR_MAX_VLANS)
                                return -EINVAL;
 
-                       attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
+                       attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr;
                        break;
                default:
                        goto out_invalid_arg;
@@ -530,17 +532,37 @@ out_invalid_arg:
        return -EINVAL;
 }
 
+static unsigned int action_size[DR_ACTION_TYP_MAX] = {
+       [DR_ACTION_TYP_TNL_L2_TO_L2] = sizeof(struct mlx5dr_action_reformat),
+       [DR_ACTION_TYP_L2_TO_TNL_L2] = sizeof(struct mlx5dr_action_reformat),
+       [DR_ACTION_TYP_TNL_L3_TO_L2] = sizeof(struct mlx5dr_action_rewrite),
+       [DR_ACTION_TYP_L2_TO_TNL_L3] = sizeof(struct mlx5dr_action_reformat),
+       [DR_ACTION_TYP_FT]           = sizeof(struct mlx5dr_action_dest_tbl),
+       [DR_ACTION_TYP_CTR]          = sizeof(struct mlx5dr_action_ctr),
+       [DR_ACTION_TYP_TAG]          = sizeof(struct mlx5dr_action_flow_tag),
+       [DR_ACTION_TYP_MODIFY_HDR]   = sizeof(struct mlx5dr_action_rewrite),
+       [DR_ACTION_TYP_VPORT]        = sizeof(struct mlx5dr_action_vport),
+       [DR_ACTION_TYP_PUSH_VLAN]    = sizeof(struct mlx5dr_action_push_vlan),
+};
+
 static struct mlx5dr_action *
 dr_action_create_generic(enum mlx5dr_action_type action_type)
 {
        struct mlx5dr_action *action;
+       int extra_size;
+
+       if (action_type < DR_ACTION_TYP_MAX)
+               extra_size = action_size[action_type];
+       else
+               return NULL;
 
-       action = kzalloc(sizeof(*action), GFP_KERNEL);
+       action = kzalloc(sizeof(*action) + extra_size, GFP_KERNEL);
        if (!action)
                return NULL;
 
        action->action_type = action_type;
        refcount_set(&action->refcount, 1);
+       action->data = action + 1;
 
        return action;
 }
@@ -559,10 +581,10 @@ mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num)
        if (!action)
                return NULL;
 
-       action->dest_tbl.is_fw_tbl = true;
-       action->dest_tbl.fw_tbl.dmn = dmn;
-       action->dest_tbl.fw_tbl.id = table_num;
-       action->dest_tbl.fw_tbl.type = FS_FT_FDB;
+       action->dest_tbl->is_fw_tbl = true;
+       action->dest_tbl->fw_tbl.dmn = dmn;
+       action->dest_tbl->fw_tbl.id = table_num;
+       action->dest_tbl->fw_tbl.type = FS_FT_FDB;
        refcount_inc(&dmn->refcount);
 
        return action;
@@ -579,7 +601,7 @@ mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
        if (!action)
                goto dec_ref;
 
-       action->dest_tbl.tbl = tbl;
+       action->dest_tbl->tbl = tbl;
 
        return action;
 
@@ -624,12 +646,12 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
                case DR_ACTION_TYP_VPORT:
                        hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
                        hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
-                       hw_dests[i].vport.num = dest_action->vport.caps->num;
-                       hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi;
+                       hw_dests[i].vport.num = dest_action->vport->caps->num;
+                       hw_dests[i].vport.vhca_id = dest_action->vport->caps->vhca_gvmi;
                        if (reformat_action) {
                                reformat_req = true;
                                hw_dests[i].vport.reformat_id =
-                                       reformat_action->reformat.reformat_id;
+                                       reformat_action->reformat->reformat_id;
                                ref_actions[num_of_ref++] = reformat_action;
                                hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
                        }
@@ -637,10 +659,10 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
 
                case DR_ACTION_TYP_FT:
                        hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-                       if (dest_action->dest_tbl.is_fw_tbl)
-                               hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id;
+                       if (dest_action->dest_tbl->is_fw_tbl)
+                               hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
                        else
-                               hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id;
+                               hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
                        break;
 
                default:
@@ -657,8 +679,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
                                      hw_dests,
                                      num_of_dests,
                                      reformat_req,
-                                     &action->dest_tbl.fw_tbl.id,
-                                     &action->dest_tbl.fw_tbl.group_id);
+                                     &action->dest_tbl->fw_tbl.id,
+                                     &action->dest_tbl->fw_tbl.group_id);
        if (ret)
                goto free_action;
 
@@ -667,11 +689,11 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
        for (i = 0; i < num_of_ref; i++)
                refcount_inc(&ref_actions[i]->refcount);
 
-       action->dest_tbl.is_fw_tbl = true;
-       action->dest_tbl.fw_tbl.dmn = dmn;
-       action->dest_tbl.fw_tbl.type = FS_FT_FDB;
-       action->dest_tbl.fw_tbl.ref_actions = ref_actions;
-       action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref;
+       action->dest_tbl->is_fw_tbl = true;
+       action->dest_tbl->fw_tbl.dmn = dmn;
+       action->dest_tbl->fw_tbl.type = FS_FT_FDB;
+       action->dest_tbl->fw_tbl.ref_actions = ref_actions;
+       action->dest_tbl->fw_tbl.num_of_ref_actions = num_of_ref;
 
        kfree(hw_dests);
 
@@ -696,10 +718,10 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn,
        if (!action)
                return NULL;
 
-       action->dest_tbl.is_fw_tbl = 1;
-       action->dest_tbl.fw_tbl.type = ft->type;
-       action->dest_tbl.fw_tbl.id = ft->id;
-       action->dest_tbl.fw_tbl.dmn = dmn;
+       action->dest_tbl->is_fw_tbl = 1;
+       action->dest_tbl->fw_tbl.type = ft->type;
+       action->dest_tbl->fw_tbl.id = ft->id;
+       action->dest_tbl->fw_tbl.dmn = dmn;
 
        refcount_inc(&dmn->refcount);
 
@@ -715,7 +737,7 @@ mlx5dr_action_create_flow_counter(u32 counter_id)
        if (!action)
                return NULL;
 
-       action->ctr.ctr_id = counter_id;
+       action->ctr->ctr_id = counter_id;
 
        return action;
 }
@@ -728,7 +750,7 @@ struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
        if (!action)
                return NULL;
 
-       action->flow_tag = tag_value & 0xffffff;
+       action->flow_tag->flow_tag = tag_value & 0xffffff;
 
        return action;
 }
@@ -794,8 +816,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
                if (ret)
                        return ret;
 
-               action->reformat.reformat_id = reformat_id;
-               action->reformat.reformat_size = data_sz;
+               action->reformat->reformat_id = reformat_id;
+               action->reformat->reformat_size = data_sz;
                return 0;
        }
        case DR_ACTION_TYP_TNL_L2_TO_L2:
@@ -811,28 +833,28 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
                                                          data, data_sz,
                                                          hw_actions,
                                                          ACTION_CACHE_LINE_SIZE,
-                                                         &action->rewrite.num_of_actions);
+                                                         &action->rewrite->num_of_actions);
                if (ret) {
                        mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
                        return ret;
                }
 
-               action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
-                                                              DR_CHUNK_SIZE_8);
-               if (!action->rewrite.chunk) {
+               action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
+                                                               DR_CHUNK_SIZE_8);
+               if (!action->rewrite->chunk) {
                        mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
                        return -ENOMEM;
                }
 
-               action->rewrite.data = (void *)hw_actions;
-               action->rewrite.index = (action->rewrite.chunk->icm_addr -
+               action->rewrite->data = (void *)hw_actions;
+               action->rewrite->index = (action->rewrite->chunk->icm_addr -
                                         dmn->info.caps.hdr_modify_icm_addr) /
                                         ACTION_CACHE_LINE_SIZE;
 
                ret = mlx5dr_send_postsend_action(dmn, action);
                if (ret) {
                        mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
-                       mlx5dr_icm_free_chunk(action->rewrite.chunk);
+                       mlx5dr_icm_free_chunk(action->rewrite->chunk);
                        return ret;
                }
                return 0;
@@ -867,7 +889,7 @@ struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
        if (!action)
                return NULL;
 
-       action->push_vlan.vlan_hdr = vlan_hdr_h;
+       action->push_vlan->vlan_hdr = vlan_hdr_h;
        return action;
 }
 
@@ -898,7 +920,7 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
        if (!action)
                goto dec_ref;
 
-       action->reformat.dmn = dmn;
+       action->reformat->dmn = dmn;
 
        ret = dr_action_create_reformat_action(dmn,
                                               data_sz,
@@ -1104,17 +1126,17 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
                                            const __be64 *sw_action)
 {
        u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
-       struct mlx5dr_domain *dmn = action->rewrite.dmn;
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
 
        if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
-               action->rewrite.allow_rx = 0;
+               action->rewrite->allow_rx = 0;
                if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
                        mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
                                   sw_field);
                        return -EINVAL;
                }
        } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
-               action->rewrite.allow_tx = 0;
+               action->rewrite->allow_tx = 0;
                if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
                        mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
                                   sw_field);
@@ -1122,7 +1144,7 @@ dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
                }
        }
 
-       if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+       if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
                mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n");
                return -EINVAL;
        }
@@ -1135,7 +1157,7 @@ dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action,
                                            const __be64 *sw_action)
 {
        u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
-       struct mlx5dr_domain *dmn = action->rewrite.dmn;
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
 
        if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
            sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
@@ -1153,7 +1175,7 @@ static int
 dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
                                             const __be64 *sw_action)
 {
-       struct mlx5dr_domain *dmn = action->rewrite.dmn;
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
        u16 sw_fields[2];
        int i;
 
@@ -1162,14 +1184,14 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
 
        for (i = 0; i < 2; i++) {
                if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
-                       action->rewrite.allow_rx = 0;
+                       action->rewrite->allow_rx = 0;
                        if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
                                mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
                                           sw_fields[i]);
                                return -EINVAL;
                        }
                } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
-                       action->rewrite.allow_tx = 0;
+                       action->rewrite->allow_tx = 0;
                        if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
                                mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
                                           sw_fields[i]);
@@ -1178,7 +1200,7 @@ dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
                }
        }
 
-       if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+       if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
                mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n");
                return -EINVAL;
        }
@@ -1190,7 +1212,7 @@ static int
 dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
                                        const __be64 *sw_action)
 {
-       struct mlx5dr_domain *dmn = action->rewrite.dmn;
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
        u8 action_type;
        int ret;
 
@@ -1239,7 +1261,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
 {
        const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
        const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
-       struct mlx5dr_domain *dmn = action->rewrite.dmn;
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
        int ret, i, hw_idx = 0;
        __be64 *sw_action;
        __be64 hw_action;
@@ -1249,8 +1271,8 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
 
        *modify_ttl = false;
 
-       action->rewrite.allow_rx = 1;
-       action->rewrite.allow_tx = 1;
+       action->rewrite->allow_rx = 1;
+       action->rewrite->allow_tx = 1;
 
        for (i = 0; i < num_sw_actions; i++) {
                sw_action = &sw_actions[i];
@@ -1358,13 +1380,13 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
        if (ret)
                goto free_hw_actions;
 
-       action->rewrite.chunk = chunk;
-       action->rewrite.modify_ttl = modify_ttl;
-       action->rewrite.data = (u8 *)hw_actions;
-       action->rewrite.num_of_actions = num_hw_actions;
-       action->rewrite.index = (chunk->icm_addr -
-                                dmn->info.caps.hdr_modify_icm_addr) /
-                                ACTION_CACHE_LINE_SIZE;
+       action->rewrite->chunk = chunk;
+       action->rewrite->modify_ttl = modify_ttl;
+       action->rewrite->data = (u8 *)hw_actions;
+       action->rewrite->num_of_actions = num_hw_actions;
+       action->rewrite->index = (chunk->icm_addr -
+                                 dmn->info.caps.hdr_modify_icm_addr) /
+                                 ACTION_CACHE_LINE_SIZE;
 
        ret = mlx5dr_send_postsend_action(dmn, action);
        if (ret)
@@ -1399,7 +1421,7 @@ mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn,
        if (!action)
                goto dec_ref;
 
-       action->rewrite.dmn = dmn;
+       action->rewrite->dmn = dmn;
 
        ret = dr_action_create_modify_action(dmn,
                                             actions_sz,
@@ -1451,8 +1473,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
        if (!action)
                return NULL;
 
-       action->vport.dmn = vport_dmn;
-       action->vport.caps = vport_cap;
+       action->vport->dmn = vport_dmn;
+       action->vport->caps = vport_cap;
 
        return action;
 }
@@ -1464,44 +1486,44 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
 
        switch (action->action_type) {
        case DR_ACTION_TYP_FT:
-               if (action->dest_tbl.is_fw_tbl)
-                       refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount);
+               if (action->dest_tbl->is_fw_tbl)
+                       refcount_dec(&action->dest_tbl->fw_tbl.dmn->refcount);
                else
-                       refcount_dec(&action->dest_tbl.tbl->refcount);
+                       refcount_dec(&action->dest_tbl->tbl->refcount);
 
-               if (action->dest_tbl.is_fw_tbl &&
-                   action->dest_tbl.fw_tbl.num_of_ref_actions) {
+               if (action->dest_tbl->is_fw_tbl &&
+                   action->dest_tbl->fw_tbl.num_of_ref_actions) {
                        struct mlx5dr_action **ref_actions;
                        int i;
 
-                       ref_actions = action->dest_tbl.fw_tbl.ref_actions;
-                       for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++)
+                       ref_actions = action->dest_tbl->fw_tbl.ref_actions;
+                       for (i = 0; i < action->dest_tbl->fw_tbl.num_of_ref_actions; i++)
                                refcount_dec(&ref_actions[i]->refcount);
 
                        kfree(ref_actions);
 
-                       mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn,
-                                                action->dest_tbl.fw_tbl.id,
-                                                action->dest_tbl.fw_tbl.group_id);
+                       mlx5dr_fw_destroy_md_tbl(action->dest_tbl->fw_tbl.dmn,
+                                                action->dest_tbl->fw_tbl.id,
+                                                action->dest_tbl->fw_tbl.group_id);
                }
                break;
        case DR_ACTION_TYP_TNL_L2_TO_L2:
-               refcount_dec(&action->reformat.dmn->refcount);
+               refcount_dec(&action->reformat->dmn->refcount);
                break;
        case DR_ACTION_TYP_TNL_L3_TO_L2:
-               mlx5dr_icm_free_chunk(action->rewrite.chunk);
-               refcount_dec(&action->reformat.dmn->refcount);
+               mlx5dr_icm_free_chunk(action->rewrite->chunk);
+               refcount_dec(&action->rewrite->dmn->refcount);
                break;
        case DR_ACTION_TYP_L2_TO_TNL_L2:
        case DR_ACTION_TYP_L2_TO_TNL_L3:
-               mlx5dr_cmd_destroy_reformat_ctx((action->reformat.dmn)->mdev,
-                                               action->reformat.reformat_id);
-               refcount_dec(&action->reformat.dmn->refcount);
+               mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev,
+                                               action->reformat->reformat_id);
+               refcount_dec(&action->reformat->dmn->refcount);
                break;
        case DR_ACTION_TYP_MODIFY_HDR:
-               mlx5dr_icm_free_chunk(action->rewrite.chunk);
-               kfree(action->rewrite.data);
-               refcount_dec(&action->rewrite.dmn->refcount);
+               mlx5dr_icm_free_chunk(action->rewrite->chunk);
+               kfree(action->rewrite->data);
+               refcount_dec(&action->rewrite->dmn->refcount);
                break;
        default:
                break;
index 30b0136..461473d 100644 (file)
@@ -287,7 +287,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
        u32 *in;
        int err;
 
-       in = kzalloc(inlen, GFP_KERNEL);
+       in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
                return -ENOMEM;
 
@@ -302,7 +302,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
        *group_id = MLX5_GET(create_flow_group_out, out, group_id);
 
 out:
-       kfree(in);
+       kvfree(in);
        return err;
 }
 
index 8a6a56f..c1926d9 100644 (file)
@@ -406,7 +406,7 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
                alloc_size = *num_stes * DR_STE_SIZE;
        }
 
-       *data = kzalloc(alloc_size, GFP_KERNEL);
+       *data = kvzalloc(alloc_size, GFP_KERNEL);
        if (!*data)
                return -ENOMEM;
 
@@ -505,7 +505,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
        }
 
 out_free:
-       kfree(data);
+       kvfree(data);
        return ret;
 }
 
@@ -562,7 +562,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
        }
 
 out_free:
-       kfree(data);
+       kvfree(data);
        return ret;
 }
 
@@ -572,12 +572,12 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
        struct postsend_info send_info = {};
        int ret;
 
-       send_info.write.addr = (uintptr_t)action->rewrite.data;
-       send_info.write.length = action->rewrite.num_of_actions *
+       send_info.write.addr = (uintptr_t)action->rewrite->data;
+       send_info.write.length = action->rewrite->num_of_actions *
                                 DR_MODIFY_ACTION_SIZE;
        send_info.write.lkey = 0;
-       send_info.remote_addr = action->rewrite.chunk->mr_addr;
-       send_info.rkey = action->rewrite.chunk->rkey;
+       send_info.remote_addr = action->rewrite->chunk->mr_addr;
+       send_info.rkey = action->rewrite->chunk->rkey;
 
        ret = dr_postsend_icm_data(dmn, &send_info);
 
index b599b6b..30ae3cd 100644 (file)
@@ -29,7 +29,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
                        last_htbl = tbl->rx.s_anchor;
 
                tbl->rx.default_icm_addr = action ?
-                       action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
+                       action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
                        tbl->rx.nic_dmn->default_icm_addr;
 
                info.type = CONNECT_MISS;
@@ -53,7 +53,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
                        last_htbl = tbl->tx.s_anchor;
 
                tbl->tx.default_icm_addr = action ?
-                       action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr :
+                       action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr :
                        tbl->tx.nic_dmn->default_icm_addr;
 
                info.type = CONNECT_MISS;
index 4af0e4e..4626739 100644 (file)
@@ -806,53 +806,71 @@ struct mlx5dr_ste_action_modify_field {
        u8 l4_type;
 };
 
+struct mlx5dr_action_rewrite {
+       struct mlx5dr_domain *dmn;
+       struct mlx5dr_icm_chunk *chunk;
+       u8 *data;
+       u16 num_of_actions;
+       u32 index;
+       u8 allow_rx:1;
+       u8 allow_tx:1;
+       u8 modify_ttl:1;
+};
+
+struct mlx5dr_action_reformat {
+       struct mlx5dr_domain *dmn;
+       u32 reformat_id;
+       u32 reformat_size;
+};
+
+struct mlx5dr_action_dest_tbl {
+       u8 is_fw_tbl:1;
+       union {
+               struct mlx5dr_table *tbl;
+               struct {
+                       struct mlx5dr_domain *dmn;
+                       u32 id;
+                       u32 group_id;
+                       enum fs_flow_table_type type;
+                       u64 rx_icm_addr;
+                       u64 tx_icm_addr;
+                       struct mlx5dr_action **ref_actions;
+                       u32 num_of_ref_actions;
+               } fw_tbl;
+       };
+};
+
+struct mlx5dr_action_ctr {
+       u32 ctr_id;
+       u32 offeset;
+};
+
+struct mlx5dr_action_vport {
+       struct mlx5dr_domain *dmn;
+       struct mlx5dr_cmd_vport_cap *caps;
+};
+
+struct mlx5dr_action_push_vlan {
+       u32 vlan_hdr; /* tpid_pcp_dei_vid */
+};
+
+struct mlx5dr_action_flow_tag {
+       u32 flow_tag;
+};
+
 struct mlx5dr_action {
        enum mlx5dr_action_type action_type;
        refcount_t refcount;
+
        union {
-               struct {
-                       struct mlx5dr_domain *dmn;
-                       struct mlx5dr_icm_chunk *chunk;
-                       u8 *data;
-                       u16 num_of_actions;
-                       u32 index;
-                       u8 allow_rx:1;
-                       u8 allow_tx:1;
-                       u8 modify_ttl:1;
-               } rewrite;
-               struct {
-                       struct mlx5dr_domain *dmn;
-                       u32 reformat_id;
-                       u32 reformat_size;
-               } reformat;
-               struct {
-                       u8 is_fw_tbl:1;
-                       union {
-                               struct mlx5dr_table *tbl;
-                               struct {
-                                       struct mlx5dr_domain *dmn;
-                                       u32 id;
-                                       u32 group_id;
-                                       enum fs_flow_table_type type;
-                                       u64 rx_icm_addr;
-                                       u64 tx_icm_addr;
-                                       struct mlx5dr_action **ref_actions;
-                                       u32 num_of_ref_actions;
-                               } fw_tbl;
-                       };
-               } dest_tbl;
-               struct {
-                       u32 ctr_id;
-                       u32 offeset;
-               } ctr;
-               struct {
-                       struct mlx5dr_domain *dmn;
-                       struct mlx5dr_cmd_vport_cap *caps;
-               } vport;
-               struct {
-                       u32 vlan_hdr; /* tpid_pcp_dei_vid */
-               } push_vlan;
-               u32 flow_tag;
+               void *data;
+               struct mlx5dr_action_rewrite *rewrite;
+               struct mlx5dr_action_reformat *reformat;
+               struct mlx5dr_action_dest_tbl *dest_tbl;
+               struct mlx5dr_action_ctr *ctr;
+               struct mlx5dr_action_vport *vport;
+               struct mlx5dr_action_push_vlan *push_vlan;
+               struct mlx5dr_action_flow_tag *flow_tag;
        };
 };
 
index 85031b4..9856773 100644 (file)
@@ -1910,6 +1910,32 @@ static void rtl8169_get_ringparam(struct net_device *dev,
        data->tx_pending = NUM_TX_DESC;
 }
 
+static void rtl8169_get_pauseparam(struct net_device *dev,
+                                  struct ethtool_pauseparam *data)
+{
+       struct rtl8169_private *tp = netdev_priv(dev);
+       bool tx_pause, rx_pause;
+
+       phy_get_pause(tp->phydev, &tx_pause, &rx_pause);
+
+       data->autoneg = tp->phydev->autoneg;
+       data->tx_pause = tx_pause ? 1 : 0;
+       data->rx_pause = rx_pause ? 1 : 0;
+}
+
+static int rtl8169_set_pauseparam(struct net_device *dev,
+                                 struct ethtool_pauseparam *data)
+{
+       struct rtl8169_private *tp = netdev_priv(dev);
+
+       if (dev->mtu > ETH_DATA_LEN)
+               return -EOPNOTSUPP;
+
+       phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause);
+
+       return 0;
+}
+
 static const struct ethtool_ops rtl8169_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
                                     ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1931,6 +1957,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
        .get_ringparam          = rtl8169_get_ringparam,
+       .get_pauseparam         = rtl8169_get_pauseparam,
+       .set_pauseparam         = rtl8169_set_pauseparam,
 };
 
 static void rtl_enable_eee(struct rtl8169_private *tp)
index 3332cdf..cd590e0 100644 (file)
@@ -78,7 +78,6 @@ enum efx_loopback_mode {
                            (1 << LOOPBACK_XAUI) |              \
                            (1 << LOOPBACK_GMII) |              \
                            (1 << LOOPBACK_SGMII) |             \
-                           (1 << LOOPBACK_SGMII) |             \
                            (1 << LOOPBACK_XGBR) |              \
                            (1 << LOOPBACK_XFI) |               \
                            (1 << LOOPBACK_XAUI_FAR) |          \
index 6056659..ec140fc 100644 (file)
@@ -296,6 +296,13 @@ static int intel_crosststamp(ktime_t *device,
 
        intel_priv = priv->plat->bsp_priv;
 
+       /* Both internal crosstimestamping and external triggered event
+        * timestamping cannot be run concurrently.
+        */
+       if (priv->plat->ext_snapshot_en)
+               return -EBUSY;
+
+       mutex_lock(&priv->aux_ts_lock);
        /* Enable Internal snapshot trigger */
        acr_value = readl(ptpaddr + PTP_ACR);
        acr_value &= ~PTP_ACR_MASK;
@@ -321,6 +328,8 @@ static int intel_crosststamp(ktime_t *device,
        acr_value = readl(ptpaddr + PTP_ACR);
        acr_value |= PTP_ACR_ATSFC;
        writel(acr_value, ptpaddr + PTP_ACR);
+       /* Release the mutex */
+       mutex_unlock(&priv->aux_ts_lock);
 
        /* Trigger Internal snapshot signal
         * Create a rising edge by just toggle the GPO1 to low
@@ -520,6 +529,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
        plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
 
        plat->int_snapshot_num = AUX_SNAPSHOT1;
+       plat->ext_snapshot_num = AUX_SNAPSHOT0;
 
        plat->has_crossts = true;
        plat->crosststamp = intel_crosststamp;
index 2b5022e..2cc9175 100644 (file)
@@ -504,6 +504,8 @@ struct stmmac_ops {
 #define stmmac_fpe_irq_status(__priv, __args...) \
        stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
 
+struct stmmac_priv;
+
 /* PTP and HW Timer helpers */
 struct stmmac_hwtimestamp {
        void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
@@ -515,6 +517,7 @@ struct stmmac_hwtimestamp {
                               int add_sub, int gmac4);
        void (*get_systime) (void __iomem *ioaddr, u64 *systime);
        void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time);
+       void (*timestamp_interrupt)(struct stmmac_priv *priv);
 };
 
 #define stmmac_config_hw_tstamping(__priv, __args...) \
@@ -531,6 +534,8 @@ struct stmmac_hwtimestamp {
        stmmac_do_void_callback(__priv, ptp, get_systime, __args)
 #define stmmac_get_ptptime(__priv, __args...) \
        stmmac_do_void_callback(__priv, ptp, get_ptptime, __args)
+#define stmmac_timestamp_interrupt(__priv, __args...) \
+       stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args)
 
 /* Helpers to manage the descriptors for chain and ring modes */
 struct stmmac_mode_ops {
index b8a4226..b6cd43e 100644 (file)
@@ -250,6 +250,9 @@ struct stmmac_priv {
        int use_riwt;
        int irq_wake;
        spinlock_t ptp_lock;
+       /* Protects auxiliary snapshot registers from concurrent access. */
+       struct mutex aux_ts_lock;
+
        void __iomem *mmcaddr;
        void __iomem *ptpaddr;
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
index 113c51b..074e2cd 100644 (file)
 #include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/delay.h>
+#include <linux/ptp_clock_kernel.h>
 #include "common.h"
 #include "stmmac_ptp.h"
+#include "dwmac4.h"
+#include "stmmac.h"
 
 static void config_hw_tstamping(void __iomem *ioaddr, u32 data)
 {
@@ -163,6 +166,41 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
        *ptp_time = ns;
 }
 
+static void timestamp_interrupt(struct stmmac_priv *priv)
+{
+       u32 num_snapshot, ts_status, tsync_int;
+       struct ptp_clock_event event;
+       unsigned long flags;
+       u64 ptp_time;
+       int i;
+
+       tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE;
+
+       if (!tsync_int)
+               return;
+
+       /* Read timestamp status to clear interrupt from either external
+        * timestamp or start/end of PPS.
+        */
+       ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS);
+
+       if (!priv->plat->ext_snapshot_en)
+               return;
+
+       num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
+                      GMAC_TIMESTAMP_ATSNS_SHIFT;
+
+       for (i = 0; i < num_snapshot; i++) {
+               spin_lock_irqsave(&priv->ptp_lock, flags);
+               get_ptptime(priv->ptpaddr, &ptp_time);
+               spin_unlock_irqrestore(&priv->ptp_lock, flags);
+               event.type = PTP_CLOCK_EXTTS;
+               event.index = 0;
+               event.timestamp = ptp_time;
+               ptp_clock_event(priv->ptp_clock, &event);
+       }
+}
+
 const struct stmmac_hwtimestamp stmmac_ptp = {
        .config_hw_tstamping = config_hw_tstamping,
        .init_systime = init_systime,
@@ -171,4 +209,5 @@ const struct stmmac_hwtimestamp stmmac_ptp = {
        .adjust_systime = adjust_systime,
        .get_systime = get_systime,
        .get_ptptime = get_ptptime,
+       .timestamp_interrupt = timestamp_interrupt,
 };
index e3e2220..3a5ca58 100644 (file)
@@ -5687,6 +5687,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
                        else
                                netif_carrier_off(priv->dev);
                }
+
+               stmmac_timestamp_interrupt(priv, priv);
        }
 }
 
index b164ae2..4e86cdf 100644 (file)
@@ -135,7 +135,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
 {
        struct stmmac_priv *priv =
            container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+       void __iomem *ptpaddr = priv->ptpaddr;
+       void __iomem *ioaddr = priv->hw->pcsr;
        struct stmmac_pps_cfg *cfg;
+       u32 intr_value, acr_value;
        int ret = -EOPNOTSUPP;
        unsigned long flags;
 
@@ -159,6 +162,37 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
                                             priv->systime_flags);
                spin_unlock_irqrestore(&priv->ptp_lock, flags);
                break;
+       case PTP_CLK_REQ_EXTTS:
+               priv->plat->ext_snapshot_en = on;
+               mutex_lock(&priv->aux_ts_lock);
+               acr_value = readl(ptpaddr + PTP_ACR);
+               acr_value &= ~PTP_ACR_MASK;
+               if (on) {
+                       /* Enable External snapshot trigger */
+                       acr_value |= priv->plat->ext_snapshot_num;
+                       acr_value |= PTP_ACR_ATSFC;
+                       netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n",
+                                  priv->plat->ext_snapshot_num >>
+                                  PTP_ACR_ATSEN_SHIFT);
+                       /* Enable Timestamp Interrupt */
+                       intr_value = readl(ioaddr + GMAC_INT_EN);
+                       intr_value |= GMAC_INT_TSIE;
+                       writel(intr_value, ioaddr + GMAC_INT_EN);
+
+               } else {
+                       netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n",
+                                  priv->plat->ext_snapshot_num >>
+                                  PTP_ACR_ATSEN_SHIFT);
+                       /* Disable Timestamp Interrupt */
+                       intr_value = readl(ioaddr + GMAC_INT_EN);
+                       intr_value &= ~GMAC_INT_TSIE;
+                       writel(intr_value, ioaddr + GMAC_INT_EN);
+               }
+               writel(acr_value, ptpaddr + PTP_ACR);
+               mutex_unlock(&priv->aux_ts_lock);
+               ret = 0;
+               break;
+
        default:
                break;
        }
@@ -202,7 +236,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
        .name = "stmmac ptp",
        .max_adj = 62500000,
        .n_alarm = 0,
-       .n_ext_ts = 0,
+       .n_ext_ts = 0, /* will be overwritten in stmmac_ptp_register */
        .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */
        .n_pins = 0,
        .pps = 0,
@@ -237,8 +271,10 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
                stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
 
        stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
+       stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
 
        spin_lock_init(&priv->ptp_lock);
+       mutex_init(&priv->aux_ts_lock);
        priv->ptp_clock_ops = stmmac_ptp_clock_ops;
 
        priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
@@ -264,4 +300,6 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
                pr_debug("Removed PTP HW clock successfully on %s\n",
                         priv->dev->name);
        }
+
+       mutex_destroy(&priv->aux_ts_lock);
 }
index f88727c..53172a4 100644 (file)
@@ -73,6 +73,7 @@
 #define        PTP_ACR_ATSEN1          BIT(5)  /* Auxiliary Snapshot 1 Enable */
 #define        PTP_ACR_ATSEN2          BIT(6)  /* Auxiliary Snapshot 2 Enable */
 #define        PTP_ACR_ATSEN3          BIT(7)  /* Auxiliary Snapshot 3 Enable */
+#define        PTP_ACR_ATSEN_SHIFT     5       /* Auxiliary Snapshot shift */
 #define        PTP_ACR_MASK            GENMASK(7, 4)   /* Aux Snapshot Mask */
 #define        PMC_ART_VALUE0          0x01    /* PMC_ART[15:0] timer value */
 #define        PMC_ART_VALUE1          0x02    /* PMC_ART[31:16] timer value */
index eca8c2f..9b9ac3e 100644 (file)
 #define        MV_HOST_RST_SW  BIT(7)
 #define        MV_PORT_RST_SW  (MV_LINE_RST_SW | MV_HOST_RST_SW)
 
+/* PMD Receive Signal Detect */
+#define        MV_RX_SIGNAL_DETECT             0x000A
+#define        MV_RX_SIGNAL_DETECT_GLOBAL      BIT(0)
+
 /* 1000Base-X/SGMII Control Register */
 #define        MV_1GBX_CTRL            (0x2000 + MII_BMCR)
 
 #define        MV_1GBX_PHY_STAT_SPEED100       BIT(14)
 #define        MV_1GBX_PHY_STAT_SPEED1000      BIT(15)
 
+#define        AUTONEG_TIMEOUT 3
+
 struct mv2222_data {
        phy_interface_t line_interface;
        __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+       bool sfp_link;
 };
 
 /* SFI PMA transmit enable */
@@ -81,86 +88,6 @@ static int mv2222_soft_reset(struct phy_device *phydev)
                                         5000, 1000000, true);
 }
 
-/* Returns negative on error, 0 if link is down, 1 if link is up */
-static int mv2222_read_status_10g(struct phy_device *phydev)
-{
-       int val, link = 0;
-
-       val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
-       if (val < 0)
-               return val;
-
-       if (val & MDIO_STAT1_LSTATUS) {
-               link = 1;
-
-               /* 10GBASE-R do not support auto-negotiation */
-               phydev->autoneg = AUTONEG_DISABLE;
-               phydev->speed = SPEED_10000;
-               phydev->duplex = DUPLEX_FULL;
-       }
-
-       return link;
-}
-
-/* Returns negative on error, 0 if link is down, 1 if link is up */
-static int mv2222_read_status_1g(struct phy_device *phydev)
-{
-       int val, link = 0;
-
-       val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT);
-       if (val < 0)
-               return val;
-
-       if (!(val & BMSR_LSTATUS) ||
-           (phydev->autoneg == AUTONEG_ENABLE &&
-            !(val & BMSR_ANEGCOMPLETE)))
-               return 0;
-
-       link = 1;
-
-       val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_PHY_STAT);
-       if (val < 0)
-               return val;
-
-       if (val & MV_1GBX_PHY_STAT_AN_RESOLVED) {
-               if (val & MV_1GBX_PHY_STAT_DUPLEX)
-                       phydev->duplex = DUPLEX_FULL;
-               else
-                       phydev->duplex = DUPLEX_HALF;
-
-               if (val & MV_1GBX_PHY_STAT_SPEED1000)
-                       phydev->speed = SPEED_1000;
-               else if (val & MV_1GBX_PHY_STAT_SPEED100)
-                       phydev->speed = SPEED_100;
-               else
-                       phydev->speed = SPEED_10;
-       }
-
-       return link;
-}
-
-static int mv2222_read_status(struct phy_device *phydev)
-{
-       struct mv2222_data *priv = phydev->priv;
-       int link;
-
-       phydev->link = 0;
-       phydev->speed = SPEED_UNKNOWN;
-       phydev->duplex = DUPLEX_UNKNOWN;
-
-       if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER)
-               link = mv2222_read_status_10g(phydev);
-       else
-               link = mv2222_read_status_1g(phydev);
-
-       if (link < 0)
-               return link;
-
-       phydev->link = link;
-
-       return 0;
-}
-
 static int mv2222_disable_aneg(struct phy_device *phydev)
 {
        int ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_CTRL,
@@ -248,6 +175,24 @@ static bool mv2222_is_1gbx_capable(struct phy_device *phydev)
                                 priv->supported);
 }
 
+static bool mv2222_is_sgmii_capable(struct phy_device *phydev)
+{
+       struct mv2222_data *priv = phydev->priv;
+
+       return (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                                 priv->supported) ||
+               linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+                                 priv->supported) ||
+               linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+                                 priv->supported) ||
+               linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+                                 priv->supported) ||
+               linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+                                 priv->supported) ||
+               linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+                                 priv->supported));
+}
+
 static int mv2222_config_line(struct phy_device *phydev)
 {
        struct mv2222_data *priv = phydev->priv;
@@ -267,7 +212,8 @@ static int mv2222_config_line(struct phy_device *phydev)
        }
 }
 
-static int mv2222_setup_forced(struct phy_device *phydev)
+/* Switch between 1G (1000Base-X/SGMII) and 10G (10GBase-R) modes */
+static int mv2222_swap_line_type(struct phy_device *phydev)
 {
        struct mv2222_data *priv = phydev->priv;
        bool changed = false;
@@ -275,25 +221,23 @@ static int mv2222_setup_forced(struct phy_device *phydev)
 
        switch (priv->line_interface) {
        case PHY_INTERFACE_MODE_10GBASER:
-               if (phydev->speed == SPEED_1000 &&
-                   mv2222_is_1gbx_capable(phydev)) {
+               if (mv2222_is_1gbx_capable(phydev)) {
                        priv->line_interface = PHY_INTERFACE_MODE_1000BASEX;
                        changed = true;
                }
 
-               break;
-       case PHY_INTERFACE_MODE_1000BASEX:
-               if (phydev->speed == SPEED_10000 &&
-                   mv2222_is_10g_capable(phydev)) {
-                       priv->line_interface = PHY_INTERFACE_MODE_10GBASER;
+               if (mv2222_is_sgmii_capable(phydev)) {
+                       priv->line_interface = PHY_INTERFACE_MODE_SGMII;
                        changed = true;
                }
 
                break;
+       case PHY_INTERFACE_MODE_1000BASEX:
        case PHY_INTERFACE_MODE_SGMII:
-               ret = mv2222_set_sgmii_speed(phydev);
-               if (ret < 0)
-                       return ret;
+               if (mv2222_is_10g_capable(phydev)) {
+                       priv->line_interface = PHY_INTERFACE_MODE_10GBASER;
+                       changed = true;
+               }
 
                break;
        default:
@@ -306,6 +250,29 @@ static int mv2222_setup_forced(struct phy_device *phydev)
                        return ret;
        }
 
+       return 0;
+}
+
+static int mv2222_setup_forced(struct phy_device *phydev)
+{
+       struct mv2222_data *priv = phydev->priv;
+       int ret;
+
+       if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER) {
+               if (phydev->speed < SPEED_10000 &&
+                   phydev->speed != SPEED_UNKNOWN) {
+                       ret = mv2222_swap_line_type(phydev);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       if (priv->line_interface == PHY_INTERFACE_MODE_SGMII) {
+               ret = mv2222_set_sgmii_speed(phydev);
+               if (ret < 0)
+                       return ret;
+       }
+
        return mv2222_disable_aneg(phydev);
 }
 
@@ -319,17 +286,9 @@ static int mv2222_config_aneg(struct phy_device *phydev)
                return 0;
 
        if (phydev->autoneg == AUTONEG_DISABLE ||
-           phydev->speed == SPEED_10000)
+           priv->line_interface == PHY_INTERFACE_MODE_10GBASER)
                return mv2222_setup_forced(phydev);
 
-       if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER &&
-           mv2222_is_1gbx_capable(phydev)) {
-               priv->line_interface = PHY_INTERFACE_MODE_1000BASEX;
-               ret = mv2222_config_line(phydev);
-               if (ret < 0)
-                       return ret;
-       }
-
        adv = linkmode_adv_to_mii_adv_x(priv->supported,
                                        ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
 
@@ -363,6 +322,135 @@ static int mv2222_aneg_done(struct phy_device *phydev)
        return (ret & BMSR_ANEGCOMPLETE);
 }
 
+/* Returns negative on error, 0 if link is down, 1 if link is up */
+static int mv2222_read_status_10g(struct phy_device *phydev)
+{
+       static int timeout;
+       int val, link = 0;
+
+       val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+       if (val < 0)
+               return val;
+
+       if (val & MDIO_STAT1_LSTATUS) {
+               link = 1;
+
+               /* 10GBASE-R do not support auto-negotiation */
+               phydev->autoneg = AUTONEG_DISABLE;
+               phydev->speed = SPEED_10000;
+               phydev->duplex = DUPLEX_FULL;
+       } else {
+               if (phydev->autoneg == AUTONEG_ENABLE) {
+                       timeout++;
+
+                       if (timeout > AUTONEG_TIMEOUT) {
+                               timeout = 0;
+
+                               val = mv2222_swap_line_type(phydev);
+                               if (val < 0)
+                                       return val;
+
+                               return mv2222_config_aneg(phydev);
+                       }
+               }
+       }
+
+       return link;
+}
+
+/* Returns negative on error, 0 if link is down, 1 if link is up */
+static int mv2222_read_status_1g(struct phy_device *phydev)
+{
+       static int timeout;
+       int val, link = 0;
+
+       val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_STAT);
+       if (val < 0)
+               return val;
+
+       if (phydev->autoneg == AUTONEG_ENABLE &&
+           !(val & BMSR_ANEGCOMPLETE)) {
+               timeout++;
+
+               if (timeout > AUTONEG_TIMEOUT) {
+                       timeout = 0;
+
+                       val = mv2222_swap_line_type(phydev);
+                       if (val < 0)
+                               return val;
+
+                       return mv2222_config_aneg(phydev);
+               }
+
+               return 0;
+       }
+
+       if (!(val & BMSR_LSTATUS))
+               return 0;
+
+       link = 1;
+
+       val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_1GBX_PHY_STAT);
+       if (val < 0)
+               return val;
+
+       if (val & MV_1GBX_PHY_STAT_AN_RESOLVED) {
+               if (val & MV_1GBX_PHY_STAT_DUPLEX)
+                       phydev->duplex = DUPLEX_FULL;
+               else
+                       phydev->duplex = DUPLEX_HALF;
+
+               if (val & MV_1GBX_PHY_STAT_SPEED1000)
+                       phydev->speed = SPEED_1000;
+               else if (val & MV_1GBX_PHY_STAT_SPEED100)
+                       phydev->speed = SPEED_100;
+               else
+                       phydev->speed = SPEED_10;
+       }
+
+       return link;
+}
+
+static bool mv2222_link_is_operational(struct phy_device *phydev)
+{
+       struct mv2222_data *priv = phydev->priv;
+       int val;
+
+       val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_RX_SIGNAL_DETECT);
+       if (val < 0 || !(val & MV_RX_SIGNAL_DETECT_GLOBAL))
+               return false;
+
+       if (phydev->sfp_bus && !priv->sfp_link)
+               return false;
+
+       return true;
+}
+
+static int mv2222_read_status(struct phy_device *phydev)
+{
+       struct mv2222_data *priv = phydev->priv;
+       int link;
+
+       phydev->link = 0;
+       phydev->speed = SPEED_UNKNOWN;
+       phydev->duplex = DUPLEX_UNKNOWN;
+
+       if (!mv2222_link_is_operational(phydev))
+               return 0;
+
+       if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER)
+               link = mv2222_read_status_10g(phydev);
+       else
+               link = mv2222_read_status_1g(phydev);
+
+       if (link < 0)
+               return link;
+
+       phydev->link = link;
+
+       return 0;
+}
+
 static int mv2222_resume(struct phy_device *phydev)
 {
        return mv2222_tx_enable(phydev);
@@ -424,11 +512,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
                return ret;
 
        if (mutex_trylock(&phydev->lock)) {
-               if (priv->line_interface == PHY_INTERFACE_MODE_10GBASER)
-                       ret = mv2222_setup_forced(phydev);
-               else
-                       ret = mv2222_config_aneg(phydev);
-
+               ret = mv2222_config_aneg(phydev);
                mutex_unlock(&phydev->lock);
        }
 
@@ -446,9 +530,29 @@ static void mv2222_sfp_remove(void *upstream)
        linkmode_zero(priv->supported);
 }
 
+static void mv2222_sfp_link_up(void *upstream)
+{
+       struct phy_device *phydev = upstream;
+       struct mv2222_data *priv;
+
+       priv = phydev->priv;
+       priv->sfp_link = true;
+}
+
+static void mv2222_sfp_link_down(void *upstream)
+{
+       struct phy_device *phydev = upstream;
+       struct mv2222_data *priv;
+
+       priv = phydev->priv;
+       priv->sfp_link = false;
+}
+
 static const struct sfp_upstream_ops sfp_phy_ops = {
        .module_insert = mv2222_sfp_insert,
        .module_remove = mv2222_sfp_remove,
+       .link_up = mv2222_sfp_link_up,
+       .link_down = mv2222_sfp_link_down,
        .attach = phy_sfp_attach,
        .detach = phy_sfp_detach,
 };
index 429a710..9cf1da2 100644 (file)
@@ -152,8 +152,7 @@ mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
 };
 
 static inline u32
-mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
-                                         int vport_num)
+mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num)
 {
        return 0;
 };
index e338ef7..97edb31 100644 (file)
@@ -238,6 +238,8 @@ struct plat_stmmacenet_data {
        struct pci_dev *pdev;
        bool has_crossts;
        int int_snapshot_num;
+       int ext_snapshot_num;
+       bool ext_snapshot_en;
        bool multi_msi_en;
        int msi_mac_vec;
        int msi_wol_vec;
index 9d26544..4daa95c 100644 (file)
@@ -1593,7 +1593,8 @@ out:
        spin_unlock(&br->multicast_lock);
 }
 
-static void br_mc_disabled_update(struct net_device *dev, bool value)
+static int br_mc_disabled_update(struct net_device *dev, bool value,
+                                struct netlink_ext_ack *extack)
 {
        struct switchdev_attr attr = {
                .orig_dev = dev,
@@ -1602,11 +1603,13 @@ static void br_mc_disabled_update(struct net_device *dev, bool value)
                .u.mc_disabled = !value,
        };
 
-       switchdev_port_attr_set(dev, &attr, NULL);
+       return switchdev_port_attr_set(dev, &attr, extack);
 }
 
 int br_multicast_add_port(struct net_bridge_port *port)
 {
+       int err;
+
        port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
        port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
 
@@ -1618,8 +1621,12 @@ int br_multicast_add_port(struct net_bridge_port *port)
        timer_setup(&port->ip6_own_query.timer,
                    br_ip6_multicast_port_query_expired, 0);
 #endif
-       br_mc_disabled_update(port->dev,
-                             br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
+       err = br_mc_disabled_update(port->dev,
+                                   br_opt_get(port->br,
+                                              BROPT_MULTICAST_ENABLED),
+                                   NULL);
+       if (err)
+               return err;
 
        port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
        if (!port->mcast_stats)
@@ -3560,16 +3567,23 @@ static void br_multicast_start_querier(struct net_bridge *br,
        rcu_read_unlock();
 }
 
-int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+int br_multicast_toggle(struct net_bridge *br, unsigned long val,
+                       struct netlink_ext_ack *extack)
 {
        struct net_bridge_port *port;
        bool change_snoopers = false;
+       int err = 0;
 
        spin_lock_bh(&br->multicast_lock);
        if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
                goto unlock;
 
-       br_mc_disabled_update(br->dev, val);
+       err = br_mc_disabled_update(br->dev, val, extack);
+       if (err == -EOPNOTSUPP)
+               err = 0;
+       if (err)
+               goto unlock;
+
        br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
        if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
                change_snoopers = true;
@@ -3607,7 +3621,7 @@ unlock:
                        br_multicast_leave_snoopers(br);
        }
 
-       return 0;
+       return err;
 }
 
 bool br_multicast_enabled(const struct net_device *dev)
index f2b1343..0456593 100644 (file)
@@ -1293,7 +1293,9 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
        if (data[IFLA_BR_MCAST_SNOOPING]) {
                u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
 
-               br_multicast_toggle(br, mcast_snooping);
+               err = br_multicast_toggle(br, mcast_snooping, extack);
+               if (err)
+                       return err;
        }
 
        if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
index 5074799..7ce8a77 100644 (file)
@@ -810,7 +810,8 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
                        struct sk_buff *skb, bool local_rcv, bool local_orig);
 int br_multicast_set_router(struct net_bridge *br, unsigned long val);
 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
-int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+int br_multicast_toggle(struct net_bridge *br, unsigned long val,
+                       struct netlink_ext_ack *extack);
 int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val);
index 072e298..381467b 100644 (file)
@@ -409,17 +409,11 @@ static ssize_t multicast_snooping_show(struct device *d,
        return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_ENABLED));
 }
 
-static int toggle_multicast(struct net_bridge *br, unsigned long val,
-                           struct netlink_ext_ack *extack)
-{
-       return br_multicast_toggle(br, val);
-}
-
 static ssize_t multicast_snooping_store(struct device *d,
                                        struct device_attribute *attr,
                                        const char *buf, size_t len)
 {
-       return store_bridge_parm(d, buf, len, toggle_multicast);
+       return store_bridge_parm(d, buf, len, br_multicast_toggle);
 }
 static DEVICE_ATTR_RW(multicast_snooping);
 
index 3ad9e84..14010c0 100644 (file)
@@ -3773,13 +3773,13 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
        unsigned int tnl_hlen = skb_tnl_header_len(skb);
        unsigned int delta_truesize = 0;
        unsigned int delta_len = 0;
+       struct sk_buff *tail = NULL;
        struct sk_buff *nskb, *tmp;
        int err;
 
        skb_push(skb, -skb_network_offset(skb) + offset);
 
        skb_shinfo(skb)->frag_list = NULL;
-       skb->next = list_skb;
 
        do {
                nskb = list_skb;
@@ -3797,8 +3797,17 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
                        }
                }
 
-               if (unlikely(err))
+               if (!tail)
+                       skb->next = nskb;
+               else
+                       tail->next = nskb;
+
+               if (unlikely(err)) {
+                       nskb->next = list_skb;
                        goto err_linearize;
+               }
+
+               tail = nskb;
 
                delta_len += nskb->len;
                delta_truesize += nskb->truesize;
@@ -3825,7 +3834,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
 
        skb_gso_reset(skb);
 
-       skb->prev = nskb;
+       skb->prev = tail;
 
        if (skb_needs_linearize(skb, features) &&
            __skb_linearize(skb))
index dd1c752..35803ab 100644 (file)
@@ -754,7 +754,7 @@ int esp_input_done2(struct sk_buff *skb, int err)
        int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
        int ihl;
 
-       if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
+       if (!xo || !(xo->flags & CRYPTO_DONE))
                kfree(ESP_SKB_CB(skb)->tmp);
 
        if (unlikely(err))
index 080ee7f..20d492d 100644 (file)
@@ -705,7 +705,7 @@ static int ah6_init_state(struct xfrm_state *x)
 
        if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
            crypto_ahash_digestsize(ahash)) {
-               pr_info("AH: %s digestsize %u != %hu\n",
+               pr_info("AH: %s digestsize %u != %u\n",
                        x->aalg->alg_name, crypto_ahash_digestsize(ahash),
                        aalg_desc->uinfo.auth.icv_fullbits/8);
                goto error;
index 727d791..393ae2b 100644 (file)
@@ -1147,7 +1147,7 @@ static int esp_init_authenc(struct xfrm_state *x)
                err = -EINVAL;
                if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
                    crypto_aead_authsize(aead)) {
-                       pr_info("ESP: %s digestsize %u != %hu\n",
+                       pr_info("ESP: %s digestsize %u != %u\n",
                                x->aalg->alg_name,
                                crypto_aead_authsize(aead),
                                aalg_desc->uinfo.auth.icv_fullbits / 8);
index 4af56af..40ed4fc 100644 (file)
@@ -318,7 +318,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
        esp.plen = esp.clen - skb->len - esp.tfclen;
        esp.tailen = esp.tfclen + esp.plen + alen;
 
-       if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
+       if (!hw_offload || !skb_is_gso(skb)) {
                esp.nfrags = esp6_output_head(x, skb, &esp);
                if (esp.nfrags < 0)
                        return esp.nfrags;
index 118d585..ba96db1 100644 (file)
@@ -1359,7 +1359,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
        struct packet_sock *po, *po_next, *po_skip = NULL;
        unsigned int i, j, room = ROOM_NONE;
 
-       po = pkt_sk(f->arr[idx]);
+       po = pkt_sk(rcu_dereference(f->arr[idx]));
 
        if (try_self) {
                room = packet_rcv_has_room(po, skb);
@@ -1371,7 +1371,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
 
        i = j = min_t(int, po->rollover->sock, num - 1);
        do {
-               po_next = pkt_sk(f->arr[i]);
+               po_next = pkt_sk(rcu_dereference(f->arr[i]));
                if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
                    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
                        if (i != j)
@@ -1466,7 +1466,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
        if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
                idx = fanout_demux_rollover(f, skb, idx, true, num);
 
-       po = pkt_sk(f->arr[idx]);
+       po = pkt_sk(rcu_dereference(f->arr[idx]));
        return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
 }
 
@@ -1480,7 +1480,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
        struct packet_fanout *f = po->fanout;
 
        spin_lock(&f->lock);
-       f->arr[f->num_members] = sk;
+       rcu_assign_pointer(f->arr[f->num_members], sk);
        smp_wmb();
        f->num_members++;
        if (f->num_members == 1)
@@ -1495,11 +1495,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
 
        spin_lock(&f->lock);
        for (i = 0; i < f->num_members; i++) {
-               if (f->arr[i] == sk)
+               if (rcu_dereference_protected(f->arr[i],
+                                             lockdep_is_held(&f->lock)) == sk)
                        break;
        }
        BUG_ON(i >= f->num_members);
-       f->arr[i] = f->arr[f->num_members - 1];
+       rcu_assign_pointer(f->arr[i],
+                          rcu_dereference_protected(f->arr[f->num_members - 1],
+                                                    lockdep_is_held(&f->lock)));
        f->num_members--;
        if (f->num_members == 0)
                __dev_remove_pack(&f->prot_hook);
index 5f61e59..48af35b 100644 (file)
@@ -94,7 +94,7 @@ struct packet_fanout {
        spinlock_t              lock;
        refcount_t              sk_ref;
        struct packet_type      prot_hook ____cacheline_aligned_in_smp;
-       struct sock             *arr[];
+       struct sock     __rcu   *arr[];
 };
 
 struct packet_rollover {