Merge tag 'mac80211-next-for-net-next-2021-06-25' of git://git.kernel.org/pub/scm...
authorDavid S. Miller <davem@davemloft.net>
Mon, 28 Jun 2021 20:06:12 +0000 (13:06 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 28 Jun 2021 20:06:12 +0000 (13:06 -0700)
Johannes berg says:

====================
Lots of changes:
 * aggregation handling improvements for some drivers
 * hidden AP discovery on 6 GHz and other HE 6 GHz
   improvements
 * minstrel improvements for no-ack frames
 * deferred rate control for TXQs to improve reaction
   times
 * virtual time-based airtime scheduler
 * along with various little cleanups/fixups
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
276 files changed:
Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml [new file with mode: 0644]
Documentation/networking/device_drivers/ethernet/google/gve.rst
Documentation/networking/dsa/sja1105.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/nf_conntrack-sysctl.rst
arch/alpha/include/uapi/asm/socket.h
arch/arm64/boot/dts/microchip/sparx5.dtsi
arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
arch/mips/include/uapi/asm/socket.h
arch/parisc/include/uapi/asm/socket.h
arch/sparc/include/uapi/asm/socket.h
drivers/net/bareudp.c
drivers/net/dsa/sja1105/Kconfig
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/google/Kconfig
drivers/net/ethernet/google/gve/Makefile
drivers/net/ethernet/google/gve/gve.h
drivers/net/ethernet/google/gve/gve_adminq.c
drivers/net/ethernet/google/gve/gve_adminq.h
drivers/net/ethernet/google/gve/gve_desc_dqo.h [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_dqo.h [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_ethtool.c
drivers/net/ethernet/google/gve/gve_main.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/google/gve/gve_rx_dqo.c [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_tx.c
drivers/net/ethernet/google/gve/gve_tx_dqo.c [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_utils.c [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_utils.h [new file with mode: 0644]
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_ptp.h
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/ice/ice_trace.h [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/mellanox/Kconfig
drivers/net/ethernet/mellanox/Makefile
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/Makefile [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/core_env.c
drivers/net/ethernet/microchip/Kconfig
drivers/net/ethernet/microchip/Makefile
drivers/net/ethernet/microchip/sparx5/Kconfig [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/Makefile [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_main.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_main.h [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_packet.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_port.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_port.h [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c [new file with mode: 0644]
drivers/net/gtp.c
drivers/net/mdio/fwnode_mdio.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/pci.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
drivers/net/wireless/intel/iwlwifi/Makefile
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/fw/dump.c [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
drivers/net/wireless/intel/iwlwifi/fw/uefi.c [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/fw/uefi.h [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intersil/orinoco/hw.c
drivers/net/wireless/intersil/orinoco/hw.h
drivers/net/wireless/intersil/orinoco/wext.c
drivers/net/wireless/marvell/mwifiex/fw.h
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwl8k.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt7603/init.c
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
drivers/net/wireless/mediatek/mt76/mt7603/regs.h
drivers/net/wireless/mediatek/mt76/mt7615/Makefile
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
drivers/net/wireless/mediatek/mt76/mt7615/init.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.h
drivers/net/wireless/mediatek/mt76/mt7615/main.c
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
drivers/net/wireless/mediatek/mt76/mt76_connac.h
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
drivers/net/wireless/mediatek/mt76/mt7915/Makefile
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
drivers/net/wireless/mediatek/mt76/mt7915/init.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.h
drivers/net/wireless/mediatek/mt76/mt7915/main.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
drivers/net/wireless/mediatek/mt76/mt7915/pci.c
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
drivers/net/wireless/mediatek/mt76/mt7921/Makefile
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7921/dma.c
drivers/net/wireless/mediatek/mt76/mt7921/init.c
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
drivers/net/wireless/mediatek/mt76/mt7921/mac.h
drivers/net/wireless/mediatek/mt76/mt7921/main.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
drivers/net/wireless/mediatek/mt76/mt7921/pci.c
drivers/net/wireless/mediatek/mt76/sdio.c
drivers/net/wireless/mediatek/mt76/testmode.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/mediatek/mt7601u/usb.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/realtek/rtw88/coex.c
drivers/net/wireless/realtek/rtw88/debug.c
drivers/net/wireless/realtek/rtw88/debug.h
drivers/net/wireless/realtek/rtw88/fw.c
drivers/net/wireless/realtek/rtw88/fw.h
drivers/net/wireless/realtek/rtw88/mac80211.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw88/main.h
drivers/net/wireless/realtek/rtw88/pci.c
drivers/net/wireless/realtek/rtw88/phy.c
drivers/net/wireless/realtek/rtw88/phy.h
drivers/net/wireless/realtek/rtw88/ps.c
drivers/net/wireless/realtek/rtw88/rtw8822c.c
drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
include/linux/fwnode_mdio.h
include/net/cfg80211.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netfilter/nf_tables_core.h
include/net/sch_generic.h
include/net/sctp/structs.h
include/net/tls.h
include/net/xfrm.h
include/uapi/asm-generic/socket.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/snmp.h
net/core/dev.c
net/core/devlink.c
net/core/sock.c
net/core/sock_reuseport.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ipip.c
net/ipv4/proc.c
net/ipv4/tcp_minisocks.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/sit.c
net/mptcp/protocol.c
net/netfilter/Makefile
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink_hook.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_last.c [new file with mode: 0644]
net/sched/sch_generic.c
net/sctp/sm_statefuns.c
net/sctp/transport.c
net/smc/smc_tx.c
net/xfrm/xfrm_output.c
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/config
tools/testing/selftests/net/icmp_redirect.sh
tools/testing/selftests/net/so_netns_cookie.c [new file with mode: 0644]

diff --git a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
new file mode 100644 (file)
index 0000000..347b912
--- /dev/null
@@ -0,0 +1,226 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/microchip,sparx5-switch.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip Sparx5 Ethernet switch controller
+
+maintainers:
+  - Steen Hegelund <steen.hegelund@microchip.com>
+  - Lars Povlsen <lars.povlsen@microchip.com>
+
+description: |
+  The SparX-5 Enterprise Ethernet switch family provides a rich set of
+  Enterprise switching features such as advanced TCAM-based VLAN and
+  QoS processing enabling delivery of differentiated services, and
+  security through TCAM-based frame processing using versatile content
+  aware processor (VCAP).
+
+  IPv4/IPv6 Layer 3 (L3) unicast and multicast routing is supported
+  with up to 18K IPv4/9K IPv6 unicast LPM entries and up to 9K IPv4/3K
+  IPv6 (S,G) multicast groups.
+
+  L3 security features include source guard and reverse path
+  forwarding (uRPF) tasks. Additional L3 features include VRF-Lite and
+  IP tunnels (IP over GRE/IP).
+
+  The SparX-5 switch family targets managed Layer 2 and Layer 3
+  equipment in SMB, SME, and Enterprise where high port count
+  1G/2.5G/5G/10G switching with 10G/25G aggregation links is required.
+
+properties:
+  $nodename:
+    pattern: "^switch@[0-9a-f]+$"
+
+  compatible:
+    const: microchip,sparx5-switch
+
+  reg:
+    items:
+      - description: cpu target
+      - description: devices target
+      - description: general control block target
+
+  reg-names:
+    items:
+      - const: cpu
+      - const: devices
+      - const: gcb
+
+  interrupts:
+    minItems: 1
+    items:
+      - description: register based extraction
+      - description: frame dma based extraction
+
+  interrupt-names:
+    minItems: 1
+    items:
+      - const: xtr
+      - const: fdma
+
+  resets:
+    items:
+      - description: Reset controller used for switch core reset (soft reset)
+
+  reset-names:
+    items:
+      - const: switch
+
+  mac-address: true
+
+  ethernet-ports:
+    type: object
+    patternProperties:
+      "^port@[0-9a-f]+$":
+        type: object
+
+        properties:
+          '#address-cells':
+            const: 1
+          '#size-cells':
+            const: 0
+
+          reg:
+            description: Switch port number
+
+          phys:
+            maxItems: 1
+            description:
+              phandle of a Ethernet SerDes PHY.  This defines which SerDes
+              instance will handle the Ethernet traffic.
+
+          phy-mode:
+            description:
+              This specifies the interface used by the Ethernet SerDes towards
+              the PHY or SFP.
+
+          microchip,bandwidth:
+            description: Specifies bandwidth in Mbit/s allocated to the port.
+            $ref: "/schemas/types.yaml#/definitions/uint32"
+            maximum: 25000
+
+          phy-handle:
+            description:
+              phandle of a Ethernet PHY.  This is optional and if provided it
+              points to the cuPHY used by the Ethernet SerDes.
+
+          sfp:
+            description:
+              phandle of an SFP.  This is optional and used when not specifying
+              a cuPHY.  It points to the SFP node that describes the SFP used by
+              the Ethernet SerDes.
+
+          managed: true
+
+          microchip,sd-sgpio:
+            description:
+              Index of the ports Signal Detect SGPIO in the set of 384 SGPIOs
+              This is optional, and only needed if the default used index is
+              is not correct.
+            $ref: "/schemas/types.yaml#/definitions/uint32"
+            minimum: 0
+            maximum: 383
+
+        required:
+          - reg
+          - phys
+          - phy-mode
+          - microchip,bandwidth
+
+        oneOf:
+          - required:
+              - phy-handle
+          - required:
+              - sfp
+              - managed
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - interrupts
+  - interrupt-names
+  - resets
+  - reset-names
+  - ethernet-ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    switch: switch@600000000 {
+      compatible = "microchip,sparx5-switch";
+      reg =  <0 0x401000>,
+             <0x10004000 0x7fc000>,
+             <0x11010000 0xaf0000>;
+      reg-names = "cpu", "devices", "gcb";
+      interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+      interrupt-names = "xtr";
+      resets = <&reset 0>;
+      reset-names = "switch";
+      ethernet-ports {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        port0: port@0 {
+          reg = <0>;
+          microchip,bandwidth = <1000>;
+          phys = <&serdes 13>;
+          phy-handle = <&phy0>;
+          phy-mode = "qsgmii";
+        };
+        /* ... */
+        /* Then the 25G interfaces */
+        port60: port@60 {
+          reg = <60>;
+          microchip,bandwidth = <25000>;
+          phys = <&serdes 29>;
+          phy-mode = "10gbase-r";
+          sfp = <&sfp_eth60>;
+          managed = "in-band-status";
+          microchip,sd-sgpio = <365>;
+        };
+        port61: port@61 {
+          reg = <61>;
+          microchip,bandwidth = <25000>;
+          phys = <&serdes 30>;
+          phy-mode = "10gbase-r";
+          sfp = <&sfp_eth61>;
+          managed = "in-band-status";
+          microchip,sd-sgpio = <369>;
+        };
+        port62: port@62 {
+          reg = <62>;
+          microchip,bandwidth = <25000>;
+          phys = <&serdes 31>;
+          phy-mode = "10gbase-r";
+          sfp = <&sfp_eth62>;
+          managed = "in-band-status";
+          microchip,sd-sgpio = <373>;
+        };
+        port63: port@63 {
+          reg = <63>;
+          microchip,bandwidth = <25000>;
+          phys = <&serdes 32>;
+          phy-mode = "10gbase-r";
+          sfp = <&sfp_eth63>;
+          managed = "in-band-status";
+          microchip,sd-sgpio = <377>;
+        };
+        /* Finally the Management interface */
+        port64: port@64 {
+          reg = <64>;
+          microchip,bandwidth = <1000>;
+          phys = <&serdes 0>;
+          phy-handle = <&phy64>;
+          phy-mode = "sgmii";
+          mac-address = [ 00 00 00 01 02 03 ];
+        };
+      };
+    };
+
+...
+#  vim: set ts=2 sw=2 sts=2 tw=80 et cc=80 ft=yaml :
index 793693c..6d73ee7 100644 (file)
@@ -47,13 +47,24 @@ The driver interacts with the device in the following ways:
  - Transmit and Receive Queues
     - See description below
 
+Descriptor Formats
+------------------
+GVE supports two descriptor formats: GQI and DQO. These two formats have
+entirely different descriptors, which will be described below.
+
 Registers
 ---------
-All registers are MMIO and big endian.
+All registers are MMIO.
 
 The registers are used for initializing and configuring the device as well as
 querying device status in response to management interrupts.
 
+Endianness
+----------
+- Admin Queue messages and registers are all Big Endian.
+- GQI descriptors and datapath registers are Big Endian.
+- DQO descriptors and datapath registers are Little Endian.
+
 Admin Queue (AQ)
 ----------------
 The Admin Queue is a PAGE_SIZE memory block, treated as an array of AQ
@@ -97,10 +108,10 @@ the queues associated with that interrupt.
 The handler for these irqs schedule the napi for that block to run
 and poll the queues.
 
-Traffic Queues
---------------
-gVNIC's queues are composed of a descriptor ring and a buffer and are
-assigned to a notification block.
+GQI Traffic Queues
+------------------
+GQI queues are composed of a descriptor ring and a buffer and are assigned to a
+notification block.
 
 The descriptor rings are power-of-two-sized ring buffers consisting of
 fixed-size descriptors. They advance their head pointer using a __be32
@@ -121,3 +132,35 @@ Receive
 The buffers for receive rings are put into a data ring that is the same
 length as the descriptor ring and the head and tail pointers advance over
 the rings together.
+
+DQO Traffic Queues
+------------------
+- Every TX and RX queue is assigned a notification block.
+
+- TX and RX buffers queues, which send descriptors to the device, use MMIO
+  doorbells to notify the device of new descriptors.
+
+- RX and TX completion queues, which receive descriptors from the device, use a
+  "generation bit" to know when a descriptor was populated by the device. The
+  driver initializes all bits with the "current generation". The device will
+  populate received descriptors with the "next generation" which is inverted
+  from the current generation. When the ring wraps, the current/next generation
+  are swapped.
+
+- It's the driver's responsibility to ensure that the RX and TX completion
+  queues are not overrun. This can be accomplished by limiting the number of
+  descriptors posted to HW.
+
+- TX packets have a 16 bit completion_tag and RX buffers have a 16 bit
+  buffer_id. These will be returned on the TX completion and RX queues
+  respectively to let the driver know which packet/buffer was completed.
+
+Transmit
+~~~~~~~~
+A packet's buffers are DMA mapped for the device to access before transmission.
+After the packet was successfully transmitted, the buffers are unmapped.
+
+Receive
+~~~~~~~
+The driver posts fixed sized buffers to HW on the RX buffer queue. The packet
+received on the associated RX queue may span multiple descriptors.
index 7395a33..da4057b 100644 (file)
@@ -5,7 +5,7 @@ NXP SJA1105 switch driver
 Overview
 ========
 
-The NXP SJA1105 is a family of 6 devices:
+The NXP SJA1105 is a family of 10 SPI-managed automotive switches:
 
 - SJA1105E: First generation, no TTEthernet
 - SJA1105T: First generation, TTEthernet
@@ -13,9 +13,11 @@ The NXP SJA1105 is a family of 6 devices:
 - SJA1105Q: Second generation, TTEthernet, no SGMII
 - SJA1105R: Second generation, no TTEthernet, SGMII
 - SJA1105S: Second generation, TTEthernet, SGMII
-
-These are SPI-managed automotive switches, with all ports being gigabit
-capable, and supporting MII/RMII/RGMII and optionally SGMII on one port.
+- SJA1110A: Third generation, TTEthernet, SGMII, integrated 100base-T1 and
+  100base-TX PHYs
+- SJA1110B: Third generation, TTEthernet, SGMII, 100base-T1, 100base-TX
+- SJA1110C: Third generation, TTEthernet, SGMII, 100base-T1, 100base-TX
+- SJA1110D: Third generation, TTEthernet, SGMII, 100base-T1
 
 Being automotive parts, their configuration interface is geared towards
 set-and-forget use, with minimal dynamic interaction at runtime. They
@@ -579,3 +581,54 @@ A board would need to hook up the PHYs connected to the switch to any other
 MDIO bus available to Linux within the system (e.g. to the DSA master's MDIO
 bus). Link state management then works by the driver manually keeping in sync
 (over SPI commands) the MAC link speed with the settings negotiated by the PHY.
+
+By comparison, the SJA1110 supports an MDIO slave access point over which its
+internal 100base-T1 PHYs can be accessed from the host. This is, however, not
+used by the driver, instead the internal 100base-T1 and 100base-TX PHYs are
+accessed through SPI commands, modeled in Linux as virtual MDIO buses.
+
+The microcontroller attached to the SJA1110 port 0 also has an MDIO controller
+operating in master mode, however the driver does not support this either,
+since the microcontroller gets disabled when the Linux driver operates.
+Discrete PHYs connected to the switch ports should have their MDIO interface
+attached to an MDIO controller from the host system and not to the switch,
+similar to SJA1105.
+
+Port compatibility matrix
+-------------------------
+
+The SJA1105 port compatibility matrix is:
+
+===== ============== ============== ==============
+Port   SJA1105E/T     SJA1105P/Q     SJA1105R/S
+===== ============== ============== ==============
+0      xMII           xMII           xMII
+1      xMII           xMII           xMII
+2      xMII           xMII           xMII
+3      xMII           xMII           xMII
+4      xMII           xMII           SGMII
+===== ============== ============== ==============
+
+
+The SJA1110 port compatibility matrix is:
+
+===== ============== ============== ============== ==============
+Port   SJA1110A       SJA1110B       SJA1110C       SJA1110D
+===== ============== ============== ============== ==============
+0      RevMII (uC)    RevMII (uC)    RevMII (uC)    RevMII (uC)
+1      100base-TX     100base-TX     100base-TX
+       or SGMII                                     SGMII
+2      xMII           xMII           xMII           xMII
+       or SGMII                                     or SGMII
+3      xMII           xMII           xMII
+       or SGMII       or SGMII                      SGMII
+       or 2500base-X  or 2500base-X                 or 2500base-X
+4      SGMII          SGMII          SGMII          SGMII
+       or 2500base-X  or 2500base-X  or 2500base-X  or 2500base-X
+5      100base-T1     100base-T1     100base-T1     100base-T1
+6      100base-T1     100base-T1     100base-T1     100base-T1
+7      100base-T1     100base-T1     100base-T1     100base-T1
+8      100base-T1     100base-T1     n/a            n/a
+9      100base-T1     100base-T1     n/a            n/a
+10     100base-T1     n/a            n/a            n/a
+===== ============== ============== ============== ==============
index 8bff728..b3fa522 100644 (file)
@@ -2835,10 +2835,14 @@ encap_port - INTEGER
        Default: 0
 
 plpmtud_probe_interval - INTEGER
-        The time interval (in milliseconds) for sending PLPMTUD probe chunks.
-        These chunks are sent at the specified interval with a variable size
-        to probe the mtu of a given path between 2 endpoints. PLPMTUD will
-        be disabled when 0 is set, and other values for it must be >= 5000.
+        The time interval (in milliseconds) for the PLPMTUD probe timer,
+        which is configured to expire after this period to receive an
+        acknowledgment to a probe packet. This is also the time interval
+        between the probes for the current pmtu when the probe search
+        is done.
+
+        PLPMTUD will be disabled when 0 is set, and other values for it
+        must be >= 5000.
 
        Default: 0
 
index 11a9b76..0467b30 100644 (file)
@@ -177,3 +177,27 @@ nf_conntrack_gre_timeout_stream - INTEGER (seconds)
 
        This extended timeout will be used in case there is an GRE stream
        detected.
+
+nf_flowtable_tcp_timeout - INTEGER (seconds)
+        default 30
+
+        Control offload timeout for tcp connections.
+        TCP connections may be offloaded from nf conntrack to nf flow table.
+        Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
+
+nf_flowtable_tcp_pickup - INTEGER (seconds)
+        default 120
+
+        TCP connection timeout after being aged from nf flow table offload.
+
+nf_flowtable_udp_timeout - INTEGER (seconds)
+        default 30
+
+        Control offload timeout for udp connections.
+        UDP connections may be offloaded from nf conntrack to nf flow table.
+        Once aged, the connection is returned to nf conntrack with udp pickup timeout.
+
+nf_flowtable_udp_pickup - INTEGER (seconds)
+        default 30
+
+        UDP connection timeout after being aged from nf flow table offload.
index 5742035..6b3daba 100644 (file)
 #define SO_PREFER_BUSY_POLL    69
 #define SO_BUSY_POLL_BUDGET    70
 
+#define SO_NETNS_COOKIE                71
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64
index d64621d..ad07fff 100644 (file)
                        };
                };
 
-               reset@611010008 {
-                       compatible = "microchip,sparx5-chip-reset";
+               reset: reset-controller@611010008 {
+                       compatible = "microchip,sparx5-switch-reset";
                        reg = <0x6 0x11010008 0x4>;
+                       reg-names = "gcb";
+                       #reset-cells = <1>;
+                       cpu-syscon = <&cpu_ctrl>;
                };
 
                uart0: serial@600100000 {
                                        "GPIO_46", "GPIO_47";
                                function = "emmc";
                        };
+
+                       miim1_pins: miim1-pins {
+                               pins = "GPIO_56", "GPIO_57";
+                               function = "miim";
+                       };
+
+                       miim2_pins: miim2-pins {
+                               pins = "GPIO_58", "GPIO_59";
+                               function = "miim";
+                       };
+
+                       miim3_pins: miim3-pins {
+                               pins = "GPIO_52", "GPIO_53";
+                               function = "miim";
+                       };
                };
 
                sgpio0: gpio@61101036c {
                        clocks = <&sys_clk>;
                        pinctrl-0 = <&sgpio0_pins>;
                        pinctrl-names = "default";
+                       resets = <&reset 0>;
+                       reset-names = "switch";
                        reg = <0x6 0x1101036c 0x100>;
                        sgpio_in0: gpio@0 {
                                compatible = "microchip,sparx5-sgpio-bank";
                                gpio-controller;
                                #gpio-cells = <3>;
                                ngpios = <96>;
+                               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-controller;
+                               #interrupt-cells = <3>;
                        };
                        sgpio_out0: gpio@1 {
                                compatible = "microchip,sparx5-sgpio-bank";
                        clocks = <&sys_clk>;
                        pinctrl-0 = <&sgpio1_pins>;
                        pinctrl-names = "default";
+                       resets = <&reset 0>;
+                       reset-names = "switch";
                        reg = <0x6 0x11010484 0x100>;
                        sgpio_in1: gpio@0 {
                                compatible = "microchip,sparx5-sgpio-bank";
                                gpio-controller;
                                #gpio-cells = <3>;
                                ngpios = <96>;
+                               interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-controller;
+                               #interrupt-cells = <3>;
                        };
                        sgpio_out1: gpio@1 {
                                compatible = "microchip,sparx5-sgpio-bank";
                        clocks = <&sys_clk>;
                        pinctrl-0 = <&sgpio2_pins>;
                        pinctrl-names = "default";
+                       resets = <&reset 0>;
+                       reset-names = "switch";
                        reg = <0x6 0x1101059c 0x100>;
                        sgpio_in2: gpio@0 {
                                reg = <0>;
                                gpio-controller;
                                #gpio-cells = <3>;
                                ngpios = <96>;
+                               interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-controller;
+                               #interrupt-cells = <3>;
                        };
                        sgpio_out2: gpio@1 {
                                compatible = "microchip,sparx5-sgpio-bank";
                        #thermal-sensor-cells = <0>;
                        clocks = <&ahb_clk>;
                };
+
+               mdio0: mdio@6110102b0 {
+                       compatible = "mscc,ocelot-miim";
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x6 0x110102b0 0x24>;
+               };
+
+               mdio1: mdio@6110102d4 {
+                       compatible = "mscc,ocelot-miim";
+                       status = "disabled";
+                       pinctrl-0 = <&miim1_pins>;
+                       pinctrl-names = "default";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x6 0x110102d4 0x24>;
+               };
+
+               mdio2: mdio@6110102f8 {
+                       compatible = "mscc,ocelot-miim";
+                       status = "disabled";
+                       pinctrl-0 = <&miim2_pins>;
+                       pinctrl-names = "default";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x6 0x110102d4 0x24>;
+               };
+
+               mdio3: mdio@61101031c {
+                       compatible = "mscc,ocelot-miim";
+                       status = "disabled";
+                       pinctrl-0 = <&miim3_pins>;
+                       pinctrl-names = "default";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x6 0x1101031c 0x24>;
+               };
+
+               serdes: serdes@10808000 {
+                       compatible = "microchip,sparx5-serdes";
+                       #phy-cells = <1>;
+                       clocks = <&sys_clk>;
+                       reg = <0x6 0x10808000 0x5d0000>;
+               };
+
+               switch: switch@0x600000000 {
+                       compatible = "microchip,sparx5-switch";
+                       reg =   <0x6 0 0x401000>,
+                               <0x6 0x10004000 0x7fc000>,
+                               <0x6 0x11010000 0xaf0000>;
+                       reg-names = "cpu", "dev", "gcb";
+                       interrupt-names = "xtr";
+                       interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+                       resets = <&reset 0>;
+                       reset-names = "switch";
+               };
        };
 };
index f0c9151..33faf1f 100644 (file)
@@ -7,30 +7,6 @@
 #include "sparx5_pcb_common.dtsi"
 
 /{
-       aliases {
-           i2c0   = &i2c0;
-           i2c100 = &i2c100;
-           i2c101 = &i2c101;
-           i2c102 = &i2c102;
-           i2c103 = &i2c103;
-           i2c104 = &i2c104;
-           i2c105 = &i2c105;
-           i2c106 = &i2c106;
-           i2c107 = &i2c107;
-           i2c108 = &i2c108;
-           i2c109 = &i2c109;
-           i2c110 = &i2c110;
-           i2c111 = &i2c111;
-           i2c112 = &i2c112;
-           i2c113 = &i2c113;
-           i2c114 = &i2c114;
-           i2c115 = &i2c115;
-           i2c116 = &i2c116;
-           i2c117 = &i2c117;
-           i2c118 = &i2c118;
-           i2c119 = &i2c119;
-       };
-
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
 
 &spi0 {
        status = "okay";
-       spi@0 {
-               compatible = "spi-mux";
-               mux-controls = <&mux>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-               reg = <0>;      /* CS0 */
-               spi-flash@9 {
-                       compatible = "jedec,spi-nor";
-                       spi-max-frequency = <8000000>;
-                       reg = <0x9>;    /* SPI */
-               };
+       spi-flash@0 {
+               compatible = "jedec,spi-nor";
+               spi-max-frequency = <8000000>;
+               reg = <0>;
        };
 };
 
        };
 };
 
+&sgpio0 {
+       status = "okay";
+       microchip,sgpio-port-ranges = <8 15>;
+       gpio@0 {
+               ngpios = <64>;
+       };
+       gpio@1 {
+               ngpios = <64>;
+       };
+};
+
+&sgpio1 {
+       status = "okay";
+       microchip,sgpio-port-ranges = <24 31>;
+       gpio@0 {
+               ngpios = <64>;
+       };
+       gpio@1 {
+               ngpios = <64>;
+       };
+};
+
+&sgpio2 {
+       status = "okay";
+       microchip,sgpio-port-ranges = <0 0>, <11 31>;
+};
+
 &gpio {
        i2cmux_pins_i: i2cmux-pins-i {
               pins = "GPIO_16", "GPIO_17", "GPIO_18", "GPIO_19",
 
 &i2c0_imux {
        pinctrl-names =
-               "i2c100", "i2c101", "i2c102", "i2c103",
-               "i2c104", "i2c105", "i2c106", "i2c107",
-               "i2c108", "i2c109", "i2c110", "i2c111", "idle";
+               "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
+               "i2c_sfp5", "i2c_sfp6", "i2c_sfp7", "i2c_sfp8",
+               "i2c_sfp9", "i2c_sfp10", "i2c_sfp11", "i2c_sfp12", "idle";
        pinctrl-0 = <&i2cmux_0>;
        pinctrl-1 = <&i2cmux_1>;
        pinctrl-2 = <&i2cmux_2>;
        pinctrl-10 = <&i2cmux_10>;
        pinctrl-11 = <&i2cmux_11>;
        pinctrl-12 = <&i2cmux_pins_i>;
-       i2c100: i2c_sfp1 {
+       i2c_sfp1: i2c_sfp1 {
                reg = <0x0>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c101: i2c_sfp2 {
+       i2c_sfp2: i2c_sfp2 {
                reg = <0x1>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c102: i2c_sfp3 {
+       i2c_sfp3: i2c_sfp3 {
                reg = <0x2>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c103: i2c_sfp4 {
+       i2c_sfp4: i2c_sfp4 {
                reg = <0x3>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c104: i2c_sfp5 {
+       i2c_sfp5: i2c_sfp5 {
                reg = <0x4>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c105: i2c_sfp6 {
+       i2c_sfp6: i2c_sfp6 {
                reg = <0x5>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c106: i2c_sfp7 {
+       i2c_sfp7: i2c_sfp7 {
                reg = <0x6>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c107: i2c_sfp8 {
+       i2c_sfp8: i2c_sfp8 {
                reg = <0x7>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c108: i2c_sfp9 {
+       i2c_sfp9: i2c_sfp9 {
                reg = <0x8>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c109: i2c_sfp10 {
+       i2c_sfp10: i2c_sfp10 {
                reg = <0x9>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c110: i2c_sfp11 {
+       i2c_sfp11: i2c_sfp11 {
                reg = <0xa>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c111: i2c_sfp12 {
+       i2c_sfp12: i2c_sfp12 {
                reg = <0xb>;
                #address-cells = <1>;
                #size-cells = <0>;
                     &gpio 61 GPIO_ACTIVE_HIGH
                     &gpio 54 GPIO_ACTIVE_HIGH>;
        idle-state = <0x8>;
-       i2c112: i2c_sfp13 {
+       i2c_sfp13: i2c_sfp13 {
                reg = <0x0>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c113: i2c_sfp14 {
+       i2c_sfp14: i2c_sfp14 {
                reg = <0x1>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c114: i2c_sfp15 {
+       i2c_sfp15: i2c_sfp15 {
                reg = <0x2>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c115: i2c_sfp16 {
+       i2c_sfp16: i2c_sfp16 {
                reg = <0x3>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c116: i2c_sfp17 {
+       i2c_sfp17: i2c_sfp17 {
                reg = <0x4>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c117: i2c_sfp18 {
+       i2c_sfp18: i2c_sfp18 {
                reg = <0x5>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c118: i2c_sfp19 {
+       i2c_sfp19: i2c_sfp19 {
                reg = <0x6>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c119: i2c_sfp20 {
+       i2c_sfp20: i2c_sfp20 {
                reg = <0x7>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
 };
+
+&mdio3 {
+       status = "ok";
+       phy64: ethernet-phy@64 {
+               reg = <28>;
+       };
+};
+
+&axi {
+       sfp_eth12: sfp-eth12 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp1>;
+               tx-disable-gpios = <&sgpio_out2 11 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 11 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 11 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 12 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth13: sfp-eth13 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp2>;
+               tx-disable-gpios = <&sgpio_out2 12 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 12 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 12 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 13 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth14: sfp-eth14 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp3>;
+               tx-disable-gpios = <&sgpio_out2 13 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 13 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 13 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 14 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth15: sfp-eth15 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp4>;
+               tx-disable-gpios = <&sgpio_out2 14 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 14 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 14 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 15 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth48: sfp-eth48 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp5>;
+               tx-disable-gpios = <&sgpio_out2 15 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 15 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 15 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 16 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth49: sfp-eth49 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp6>;
+               tx-disable-gpios = <&sgpio_out2 16 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 16 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 16 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 17 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth50: sfp-eth50 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp7>;
+               tx-disable-gpios = <&sgpio_out2 17 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 17 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 17 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 18 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth51: sfp-eth51 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp8>;
+               tx-disable-gpios = <&sgpio_out2 18 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 18 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 18 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 19 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth52: sfp-eth52 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp9>;
+               tx-disable-gpios = <&sgpio_out2 19 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 19 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 19 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 20 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth53: sfp-eth53 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp10>;
+               tx-disable-gpios = <&sgpio_out2 20 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 20 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 20 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 21 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth54: sfp-eth54 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp11>;
+               tx-disable-gpios = <&sgpio_out2 21 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 21 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 21 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 22 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth55: sfp-eth55 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp12>;
+               tx-disable-gpios = <&sgpio_out2 22 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 22 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 22 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 23 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth56: sfp-eth56 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp13>;
+               tx-disable-gpios = <&sgpio_out2 23 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 23 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 23 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 24 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth57: sfp-eth57 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp14>;
+               tx-disable-gpios = <&sgpio_out2 24 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 24 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 24 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 25 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth58: sfp-eth58 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp15>;
+               tx-disable-gpios = <&sgpio_out2 25 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 25 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 25 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 26 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth59: sfp-eth59 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp16>;
+               tx-disable-gpios = <&sgpio_out2 26 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 26 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 26 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 27 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth60: sfp-eth60 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp17>;
+               tx-disable-gpios = <&sgpio_out2 27 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 27 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 27 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth61: sfp-eth61 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp18>;
+               tx-disable-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 28 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 28 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth62: sfp-eth62 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp19>;
+               tx-disable-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 29 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 29 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth63: sfp-eth63 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp20>;
+               tx-disable-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 30 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 30 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+       };
+};
+
+&switch {
+       ethernet-ports {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               /* 10G SFPs */
+               port12: port@12 {
+                       reg = <12>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 13>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth12>;
+                       microchip,sd-sgpio = <301>;
+                       managed = "in-band-status";
+               };
+               port13: port@13 {
+                       reg = <13>;
+                       /* Example: CU SFP, 1G speed */
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 14>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth13>;
+                       microchip,sd-sgpio = <305>;
+                       managed = "in-band-status";
+               };
+               port14: port@14 {
+                       reg = <14>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 15>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth14>;
+                       microchip,sd-sgpio = <309>;
+                       managed = "in-band-status";
+               };
+               port15: port@15 {
+                       reg = <15>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 16>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth15>;
+                       microchip,sd-sgpio = <313>;
+                       managed = "in-band-status";
+               };
+               port48: port@48 {
+                       reg = <48>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 17>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth48>;
+                       microchip,sd-sgpio = <317>;
+                       managed = "in-band-status";
+               };
+               port49: port@49 {
+                       reg = <49>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 18>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth49>;
+                       microchip,sd-sgpio = <321>;
+                       managed = "in-band-status";
+               };
+               port50: port@50 {
+                       reg = <50>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 19>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth50>;
+                       microchip,sd-sgpio = <325>;
+                       managed = "in-band-status";
+               };
+               port51: port@51 {
+                       reg = <51>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 20>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth51>;
+                       microchip,sd-sgpio = <329>;
+                       managed = "in-band-status";
+               };
+               port52: port@52 {
+                       reg = <52>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 21>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth52>;
+                       microchip,sd-sgpio = <333>;
+                       managed = "in-band-status";
+               };
+               port53: port@53 {
+                       reg = <53>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 22>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth53>;
+                       microchip,sd-sgpio = <337>;
+                       managed = "in-band-status";
+               };
+               port54: port@54 {
+                       reg = <54>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 23>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth54>;
+                       microchip,sd-sgpio = <341>;
+                       managed = "in-band-status";
+               };
+               port55: port@55 {
+                       reg = <55>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 24>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth55>;
+                       microchip,sd-sgpio = <345>;
+                       managed = "in-band-status";
+               };
+               /* 25G SFPs */
+               port56: port@56 {
+                       reg = <56>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 25>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth56>;
+                       microchip,sd-sgpio = <349>;
+                       managed = "in-band-status";
+               };
+               port57: port@57 {
+                       reg = <57>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 26>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth57>;
+                       microchip,sd-sgpio = <353>;
+                       managed = "in-band-status";
+               };
+               port58: port@58 {
+                       reg = <58>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 27>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth58>;
+                       microchip,sd-sgpio = <357>;
+                       managed = "in-band-status";
+               };
+               port59: port@59 {
+                       reg = <59>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 28>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth59>;
+                       microchip,sd-sgpio = <361>;
+                       managed = "in-band-status";
+               };
+               port60: port@60 {
+                       reg = <60>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 29>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth60>;
+                       microchip,sd-sgpio = <365>;
+                       managed = "in-band-status";
+               };
+               port61: port@61 {
+                       reg = <61>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 30>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth61>;
+                       microchip,sd-sgpio = <369>;
+                       managed = "in-band-status";
+               };
+               port62: port@62 {
+                       reg = <62>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 31>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth62>;
+                       microchip,sd-sgpio = <373>;
+                       managed = "in-band-status";
+               };
+               port63: port@63 {
+                       reg = <63>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 32>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth63>;
+                       microchip,sd-sgpio = <377>;
+                       managed = "in-band-status";
+               };
+               /* Finally the Management interface */
+               port64: port@64 {
+                       reg = <64>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 0>;
+                       phy-handle = <&phy64>;
+                       phy-mode = "sgmii";
+               };
+       };
+};
index e28c6dd..ef96e6d 100644 (file)
@@ -7,14 +7,6 @@
 #include "sparx5_pcb_common.dtsi"
 
 /{
-       aliases {
-           i2c0   = &i2c0;
-           i2c152 = &i2c152;
-           i2c153 = &i2c153;
-           i2c154 = &i2c154;
-           i2c155 = &i2c155;
-       };
-
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
 
 &spi0 {
        status = "okay";
-       spi@0 {
-               compatible = "spi-mux";
-               mux-controls = <&mux>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-               reg = <0>; /* CS0 */
-               spi-flash@9 {
-                       compatible = "jedec,spi-nor";
-                       spi-max-frequency = <8000000>;
-                       reg = <0x9>; /* SPI */
-               };
+       spi-flash@0 {
+               compatible = "jedec,spi-nor";
+               spi-max-frequency = <8000000>;
+               reg = <0>;
        };
 };
 
        };
 };
 
+&sgpio2 {
+       status = "okay";
+       microchip,sgpio-port-ranges = <0 0>, <16 18>, <28 31>;
+};
+
 &axi {
        i2c0_imux: i2c0-imux@0 {
                compatible = "i2c-mux-pinctrl";
 
 &i2c0_imux {
        pinctrl-names =
-               "i2c152", "i2c153", "i2c154", "i2c155",
+               "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
                "idle";
        pinctrl-0 = <&i2cmux_s29>;
        pinctrl-1 = <&i2cmux_s30>;
        pinctrl-2 = <&i2cmux_s31>;
        pinctrl-3 = <&i2cmux_s32>;
        pinctrl-4 = <&i2cmux_pins_i>;
-       i2c152: i2c_sfp1 {
+       i2c_sfp1: i2c_sfp1 {
                reg = <0x0>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c153: i2c_sfp2 {
+       i2c_sfp2: i2c_sfp2 {
                reg = <0x1>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c154: i2c_sfp3 {
+       i2c_sfp3: i2c_sfp3 {
                reg = <0x2>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c155: i2c_sfp4 {
+       i2c_sfp4: i2c_sfp4 {
                reg = <0x3>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
 };
+
+&axi {
+       sfp_eth60: sfp-eth60 {
+               compatible         = "sff,sfp";
+               i2c-bus            = <&i2c_sfp1>;
+               tx-disable-gpios   = <&sgpio_out2 28 0 GPIO_ACTIVE_LOW>;
+               rate-select0-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_HIGH>;
+               los-gpios          = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios     = <&sgpio_in2 28 1 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios     = <&sgpio_in2 28 2 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth61: sfp-eth61 {
+               compatible         = "sff,sfp";
+               i2c-bus            = <&i2c_sfp2>;
+               tx-disable-gpios   = <&sgpio_out2 29 0 GPIO_ACTIVE_LOW>;
+               rate-select0-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_HIGH>;
+               los-gpios          = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios     = <&sgpio_in2 29 1 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios     = <&sgpio_in2 29 2 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth62: sfp-eth62 {
+               compatible         = "sff,sfp";
+               i2c-bus            = <&i2c_sfp3>;
+               tx-disable-gpios   = <&sgpio_out2 30 0 GPIO_ACTIVE_LOW>;
+               rate-select0-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_HIGH>;
+               los-gpios          = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios     = <&sgpio_in2 30 1 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios     = <&sgpio_in2 30 2 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth63: sfp-eth63 {
+               compatible         = "sff,sfp";
+               i2c-bus            = <&i2c_sfp4>;
+               tx-disable-gpios   = <&sgpio_out2 31 0 GPIO_ACTIVE_LOW>;
+               rate-select0-gpios = <&sgpio_out2 31 1 GPIO_ACTIVE_HIGH>;
+               los-gpios          = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios     = <&sgpio_in2 31 1 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios     = <&sgpio_in2 31 2 GPIO_ACTIVE_HIGH>;
+       };
+};
+
+&mdio0 {
+       status = "ok";
+       phy0: ethernet-phy@0 {
+               reg = <0>;
+       };
+       phy1: ethernet-phy@1 {
+               reg = <1>;
+       };
+       phy2: ethernet-phy@2 {
+               reg = <2>;
+       };
+       phy3: ethernet-phy@3 {
+               reg = <3>;
+       };
+       phy4: ethernet-phy@4 {
+               reg = <4>;
+       };
+       phy5: ethernet-phy@5 {
+               reg = <5>;
+       };
+       phy6: ethernet-phy@6 {
+               reg = <6>;
+       };
+       phy7: ethernet-phy@7 {
+               reg = <7>;
+       };
+       phy8: ethernet-phy@8 {
+               reg = <8>;
+       };
+       phy9: ethernet-phy@9 {
+               reg = <9>;
+       };
+       phy10: ethernet-phy@10 {
+               reg = <10>;
+       };
+       phy11: ethernet-phy@11 {
+               reg = <11>;
+       };
+       phy12: ethernet-phy@12 {
+               reg = <12>;
+       };
+       phy13: ethernet-phy@13 {
+               reg = <13>;
+       };
+       phy14: ethernet-phy@14 {
+               reg = <14>;
+       };
+       phy15: ethernet-phy@15 {
+               reg = <15>;
+       };
+       phy16: ethernet-phy@16 {
+               reg = <16>;
+       };
+       phy17: ethernet-phy@17 {
+               reg = <17>;
+       };
+       phy18: ethernet-phy@18 {
+               reg = <18>;
+       };
+       phy19: ethernet-phy@19 {
+               reg = <19>;
+       };
+       phy20: ethernet-phy@20 {
+               reg = <20>;
+       };
+       phy21: ethernet-phy@21 {
+               reg = <21>;
+       };
+       phy22: ethernet-phy@22 {
+               reg = <22>;
+       };
+       phy23: ethernet-phy@23 {
+               reg = <23>;
+       };
+};
+
+&mdio1 {
+       status = "ok";
+       phy24: ethernet-phy@24 {
+               reg = <0>;
+       };
+       phy25: ethernet-phy@25 {
+               reg = <1>;
+       };
+       phy26: ethernet-phy@26 {
+               reg = <2>;
+       };
+       phy27: ethernet-phy@27 {
+               reg = <3>;
+       };
+       phy28: ethernet-phy@28 {
+               reg = <4>;
+       };
+       phy29: ethernet-phy@29 {
+               reg = <5>;
+       };
+       phy30: ethernet-phy@30 {
+               reg = <6>;
+       };
+       phy31: ethernet-phy@31 {
+               reg = <7>;
+       };
+       phy32: ethernet-phy@32 {
+               reg = <8>;
+       };
+       phy33: ethernet-phy@33 {
+               reg = <9>;
+       };
+       phy34: ethernet-phy@34 {
+               reg = <10>;
+       };
+       phy35: ethernet-phy@35 {
+               reg = <11>;
+       };
+       phy36: ethernet-phy@36 {
+               reg = <12>;
+       };
+       phy37: ethernet-phy@37 {
+               reg = <13>;
+       };
+       phy38: ethernet-phy@38 {
+               reg = <14>;
+       };
+       phy39: ethernet-phy@39 {
+               reg = <15>;
+       };
+       phy40: ethernet-phy@40 {
+               reg = <16>;
+       };
+       phy41: ethernet-phy@41 {
+               reg = <17>;
+       };
+       phy42: ethernet-phy@42 {
+               reg = <18>;
+       };
+       phy43: ethernet-phy@43 {
+               reg = <19>;
+       };
+       phy44: ethernet-phy@44 {
+               reg = <20>;
+       };
+       phy45: ethernet-phy@45 {
+               reg = <21>;
+       };
+       phy46: ethernet-phy@46 {
+               reg = <22>;
+       };
+       phy47: ethernet-phy@47 {
+               reg = <23>;
+       };
+};
+
+&mdio3 {
+       status = "ok";
+       phy64: ethernet-phy@64 {
+               reg = <28>;
+       };
+};
+
+&switch {
+       ethernet-ports {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               port0: port@0 {
+                       reg = <0>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 13>;
+                       phy-handle = <&phy0>;
+                       phy-mode = "qsgmii";
+               };
+               port1: port@1 {
+                       reg = <1>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 13>;
+                       phy-handle = <&phy1>;
+                       phy-mode = "qsgmii";
+               };
+               port2: port@2 {
+                       reg = <2>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 13>;
+                       phy-handle = <&phy2>;
+                       phy-mode = "qsgmii";
+               };
+               port3: port@3 {
+                       reg = <3>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 13>;
+                       phy-handle = <&phy3>;
+                       phy-mode = "qsgmii";
+               };
+               port4: port@4 {
+                       reg = <4>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 14>;
+                       phy-handle = <&phy4>;
+                       phy-mode = "qsgmii";
+               };
+               port5: port@5 {
+                       reg = <5>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 14>;
+                       phy-handle = <&phy5>;
+                       phy-mode = "qsgmii";
+               };
+               port6: port@6 {
+                       reg = <6>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 14>;
+                       phy-handle = <&phy6>;
+                       phy-mode = "qsgmii";
+               };
+               port7: port@7 {
+                       reg = <7>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 14>;
+                       phy-handle = <&phy7>;
+                       phy-mode = "qsgmii";
+               };
+               port8: port@8 {
+                       reg = <8>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 15>;
+                       phy-handle = <&phy8>;
+                       phy-mode = "qsgmii";
+               };
+               port9: port@9 {
+                       reg = <9>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 15>;
+                       phy-handle = <&phy9>;
+                       phy-mode = "qsgmii";
+               };
+               port10: port@10 {
+                       reg = <10>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 15>;
+                       phy-handle = <&phy10>;
+                       phy-mode = "qsgmii";
+               };
+               port11: port@11 {
+                       reg = <11>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 15>;
+                       phy-handle = <&phy11>;
+                       phy-mode = "qsgmii";
+               };
+               port12: port@12 {
+                       reg = <12>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 16>;
+                       phy-handle = <&phy12>;
+                       phy-mode = "qsgmii";
+               };
+               port13: port@13 {
+                       reg = <13>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 16>;
+                       phy-handle = <&phy13>;
+                       phy-mode = "qsgmii";
+               };
+               port14: port@14 {
+                       reg = <14>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 16>;
+                       phy-handle = <&phy14>;
+                       phy-mode = "qsgmii";
+               };
+               port15: port@15 {
+                       reg = <15>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 16>;
+                       phy-handle = <&phy15>;
+                       phy-mode = "qsgmii";
+               };
+               port16: port@16 {
+                       reg = <16>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 17>;
+                       phy-handle = <&phy16>;
+                       phy-mode = "qsgmii";
+               };
+               port17: port@17 {
+                       reg = <17>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 17>;
+                       phy-handle = <&phy17>;
+                       phy-mode = "qsgmii";
+               };
+               port18: port@18 {
+                       reg = <18>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 17>;
+                       phy-handle = <&phy18>;
+                       phy-mode = "qsgmii";
+               };
+               port19: port@19 {
+                       reg = <19>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 17>;
+                       phy-handle = <&phy19>;
+                       phy-mode = "qsgmii";
+               };
+               port20: port@20 {
+                       reg = <20>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 18>;
+                       phy-handle = <&phy20>;
+                       phy-mode = "qsgmii";
+               };
+               port21: port@21 {
+                       reg = <21>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 18>;
+                       phy-handle = <&phy21>;
+                       phy-mode = "qsgmii";
+               };
+               port22: port@22 {
+                       reg = <22>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 18>;
+                       phy-handle = <&phy22>;
+                       phy-mode = "qsgmii";
+               };
+               port23: port@23 {
+                       reg = <23>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 18>;
+                       phy-handle = <&phy23>;
+                       phy-mode = "qsgmii";
+               };
+               port24: port@24 {
+                       reg = <24>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 19>;
+                       phy-handle = <&phy24>;
+                       phy-mode = "qsgmii";
+               };
+               port25: port@25 {
+                       reg = <25>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 19>;
+                       phy-handle = <&phy25>;
+                       phy-mode = "qsgmii";
+               };
+               port26: port@26 {
+                       reg = <26>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 19>;
+                       phy-handle = <&phy26>;
+                       phy-mode = "qsgmii";
+               };
+               port27: port@27 {
+                       reg = <27>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 19>;
+                       phy-handle = <&phy27>;
+                       phy-mode = "qsgmii";
+               };
+               port28: port@28 {
+                       reg = <28>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 20>;
+                       phy-handle = <&phy28>;
+                       phy-mode = "qsgmii";
+               };
+               port29: port@29 {
+                       reg = <29>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 20>;
+                       phy-handle = <&phy29>;
+                       phy-mode = "qsgmii";
+               };
+               port30: port@30 {
+                       reg = <30>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 20>;
+                       phy-handle = <&phy30>;
+                       phy-mode = "qsgmii";
+               };
+               port31: port@31 {
+                       reg = <31>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 20>;
+                       phy-handle = <&phy31>;
+                       phy-mode = "qsgmii";
+               };
+               port32: port@32 {
+                       reg = <32>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 21>;
+                       phy-handle = <&phy32>;
+                       phy-mode = "qsgmii";
+               };
+               port33: port@33 {
+                       reg = <33>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 21>;
+                       phy-handle = <&phy33>;
+                       phy-mode = "qsgmii";
+               };
+               port34: port@34 {
+                       reg = <34>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 21>;
+                       phy-handle = <&phy34>;
+                       phy-mode = "qsgmii";
+               };
+               port35: port@35 {
+                       reg = <35>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 21>;
+                       phy-handle = <&phy35>;
+                       phy-mode = "qsgmii";
+               };
+               port36: port@36 {
+                       reg = <36>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 22>;
+                       phy-handle = <&phy36>;
+                       phy-mode = "qsgmii";
+               };
+               port37: port@37 {
+                       reg = <37>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 22>;
+                       phy-handle = <&phy37>;
+                       phy-mode = "qsgmii";
+               };
+               port38: port@38 {
+                       reg = <38>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 22>;
+                       phy-handle = <&phy38>;
+                       phy-mode = "qsgmii";
+               };
+               port39: port@39 {
+                       reg = <39>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 22>;
+                       phy-handle = <&phy39>;
+                       phy-mode = "qsgmii";
+               };
+               port40: port@40 {
+                       reg = <40>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 23>;
+                       phy-handle = <&phy40>;
+                       phy-mode = "qsgmii";
+               };
+               port41: port@41 {
+                       reg = <41>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 23>;
+                       phy-handle = <&phy41>;
+                       phy-mode = "qsgmii";
+               };
+               port42: port@42 {
+                       reg = <42>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 23>;
+                       phy-handle = <&phy42>;
+                       phy-mode = "qsgmii";
+               };
+               port43: port@43 {
+                       reg = <43>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 23>;
+                       phy-handle = <&phy43>;
+                       phy-mode = "qsgmii";
+               };
+               port44: port@44 {
+                       reg = <44>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 24>;
+                       phy-handle = <&phy44>;
+                       phy-mode = "qsgmii";
+               };
+               port45: port@45 {
+                       reg = <45>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 24>;
+                       phy-handle = <&phy45>;
+                       phy-mode = "qsgmii";
+               };
+               port46: port@46 {
+                       reg = <46>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 24>;
+                       phy-handle = <&phy46>;
+                       phy-mode = "qsgmii";
+               };
+               port47: port@47 {
+                       reg = <47>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 24>;
+                       phy-handle = <&phy47>;
+                       phy-mode = "qsgmii";
+               };
+               /* Then the 25G interfaces */
+               port60: port@60 {
+                       reg = <60>;
+                       microchip,bandwidth = <25000>;
+                       phys = <&serdes 29>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth60>;
+                       managed = "in-band-status";
+               };
+               port61: port@61 {
+                       reg = <61>;
+                       microchip,bandwidth = <25000>;
+                       phys = <&serdes 30>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth61>;
+                       managed = "in-band-status";
+               };
+               port62: port@62 {
+                       reg = <62>;
+                       microchip,bandwidth = <25000>;
+                       phys = <&serdes 31>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth62>;
+                       managed = "in-band-status";
+               };
+               port63: port@63 {
+                       reg = <63>;
+                       microchip,bandwidth = <25000>;
+                       phys = <&serdes 32>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth63>;
+                       managed = "in-band-status";
+               };
+               /* Finally the Management interface */
+               port64: port@64 {
+                       reg = <64>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 0>;
+                       phy-handle = <&phy64>;
+                       phy-mode = "sgmii";
+               };
+       };
+};
index 2d94996..cdf404a 100644 (file)
 #define SO_PREFER_BUSY_POLL    69
 #define SO_BUSY_POLL_BUDGET    70
 
+#define SO_NETNS_COOKIE                71
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64
index f609043..5b5351c 100644 (file)
 #define SO_PREFER_BUSY_POLL    0x4043
 #define SO_BUSY_POLL_BUDGET    0x4044
 
+#define SO_NETNS_COOKIE                0x4045
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64
index 848a22f..92675dc 100644 (file)
 #define SO_PREFER_BUSY_POLL     0x0048
 #define SO_BUSY_POLL_BUDGET     0x0049
 
+#define SO_NETNS_COOKIE          0x0050
+
 #if !defined(__KERNEL__)
 
 
index edfad93..a7ee0af 100644 (file)
@@ -133,6 +133,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        skb->dev = bareudp->dev;
        oiph = skb_network_header(skb);
        skb_reset_network_header(skb);
+       skb_reset_mac_header(skb);
 
        if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
                err = IP_ECN_decapsulate(oiph, skb);
index 8383cd6..b29d41e 100644 (file)
@@ -7,8 +7,8 @@ tristate "NXP SJA1105 Ethernet switch family support"
        select PACKING
        select CRC32
        help
-         This is the driver for the NXP SJA1105 automotive Ethernet switch
-         family. These are 5-port devices and are managed over an SPI
+         This is the driver for the NXP SJA1105 (5-port) and SJA1110 (10-port)
+         automotive Ethernet switch family. These are managed over an SPI
          interface. Probing is handled based on OF bindings and so is the
          linkage to PHYLINK. The driver supports the following revisions:
            - SJA1105E (Gen. 1, No TT-Ethernet)
@@ -17,6 +17,10 @@ tristate "NXP SJA1105 Ethernet switch family support"
            - SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
            - SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
            - SJA1105S (Gen. 2, SGMII, TT-Ethernet)
+           - SJA1110A (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 10 ports)
+           - SJA1110B (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 9 ports)
+           - SJA1110C (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 7 ports)
+           - SJA1110D (Gen. 3, SGMII, TT-Ethernet, no 100base-TX PHY, 7 ports)
 
 config NET_DSA_SJA1105_PTP
        bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
index fcca023..41f7f07 100644 (file)
@@ -4296,3 +4296,4 @@ MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
 MODULE_ALIAS("platform:bcmgenet");
 MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: mdio-bcm-unimac");
index 92a3905..2d1abdd 100644 (file)
@@ -67,7 +67,9 @@ config FSL_PQ_MDIO
 
 config FSL_XGMAC_MDIO
        tristate "Freescale XGMAC MDIO"
-       depends on FWNODE_MDIO
+       select PHYLIB
+       depends on OF
+       select OF_MDIO
        help
          This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
          on the FMan mEMAC (which supports both Clauses 22 and 45)
index 2d99edc..0b68852 100644 (file)
@@ -13,7 +13,7 @@
  */
 
 #include <linux/acpi.h>
-#include <linux/fwnode_mdio.h>
+#include <linux/acpi_mdio.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/mdio.h>
@@ -246,6 +246,7 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
 
 static int xgmac_mdio_probe(struct platform_device *pdev)
 {
+       struct fwnode_handle *fwnode;
        struct mdio_fsl_priv *priv;
        struct resource *res;
        struct mii_bus *bus;
@@ -290,7 +291,13 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
        priv->has_a011043 = device_property_read_bool(&pdev->dev,
                                                      "fsl,erratum-a011043");
 
-       ret = fwnode_mdiobus_register(bus, pdev->dev.fwnode);
+       fwnode = pdev->dev.fwnode;
+       if (is_of_node(fwnode))
+               ret = of_mdiobus_register(bus, to_of_node(fwnode));
+       else if (is_acpi_node(fwnode))
+               ret = acpi_mdiobus_register(bus, fwnode);
+       else
+               ret = -EINVAL;
        if (ret) {
                dev_err(&pdev->dev, "cannot register MDIO bus\n");
                goto err_registration;
index b8f04d0..8641a00 100644 (file)
@@ -17,7 +17,7 @@ if NET_VENDOR_GOOGLE
 
 config GVE
        tristate "Google Virtual NIC (gVNIC) support"
-       depends on PCI_MSI
+       depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
        help
          This driver supports Google Virtual NIC (gVNIC)"
 
index 3354ce4..b9a6be7 100644 (file)
@@ -1,4 +1,4 @@
 # Makefile for the Google virtual Ethernet (gve) driver
 
 obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_rx.o gve_ethtool.o gve_adminq.o
+gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o
index daf07c0..1d3188e 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
  * Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #ifndef _GVE_H_
@@ -11,7 +11,9 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/u64_stats_sync.h>
+
 #include "gve_desc.h"
+#include "gve_desc_dqo.h"
 
 #ifndef PCI_VENDOR_ID_GOOGLE
 #define PCI_VENDOR_ID_GOOGLE   0x1ae0
 
 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
 
+/* PTYPEs are always 10 bits. */
+#define GVE_NUM_PTYPES 1024
+
+#define GVE_RX_BUFFER_SIZE_DQO 2048
+
 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
 struct gve_rx_desc_queue {
        struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -51,7 +58,8 @@ struct gve_rx_desc_queue {
 struct gve_rx_slot_page_info {
        struct page *page;
        void *page_address;
-       u8 page_offset; /* flipped to second half? */
+       u32 page_offset; /* offset to write to in page */
+       int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
        u8 can_flip;
 };
 
@@ -76,17 +84,117 @@ struct gve_rx_data_queue {
 
 struct gve_priv;
 
-/* An RX ring that contains a power-of-two sized desc and data ring. */
+/* RX buffer queue for posting buffers to HW.
+ * Each RX (completion) queue has a corresponding buffer queue.
+ */
+struct gve_rx_buf_queue_dqo {
+       struct gve_rx_desc_dqo *desc_ring;
+       dma_addr_t bus;
+       u32 head; /* Pointer to start cleaning buffers at. */
+       u32 tail; /* Last posted buffer index + 1 */
+       u32 mask; /* Mask for indices to the size of the ring */
+};
+
+/* RX completion queue to receive packets from HW. */
+struct gve_rx_compl_queue_dqo {
+       struct gve_rx_compl_desc_dqo *desc_ring;
+       dma_addr_t bus;
+
+       /* Number of slots which did not have a buffer posted yet. We should not
+        * post more buffers than the queue size to avoid HW overrunning the
+        * queue.
+        */
+       int num_free_slots;
+
+       /* HW uses a "generation bit" to notify SW of new descriptors. When a
+        * descriptor's generation bit is different from the current generation,
+        * that descriptor is ready to be consumed by SW.
+        */
+       u8 cur_gen_bit;
+
+       /* Pointer into desc_ring where the next completion descriptor will be
+        * received.
+        */
+       u32 head;
+       u32 mask; /* Mask for indices to the size of the ring */
+};
+
+/* Stores state for tracking buffers posted to HW */
+struct gve_rx_buf_state_dqo {
+       /* The page posted to HW. */
+       struct gve_rx_slot_page_info page_info;
+
+       /* The DMA address corresponding to `page_info`. */
+       dma_addr_t addr;
+
+       /* Last offset into the page when it only had a single reference, at
+        * which point every other offset is free to be reused.
+        */
+       u32 last_single_ref_offset;
+
+       /* Linked list index to next element in the list, or -1 if none */
+       s16 next;
+};
+
+/* `head` and `tail` are indices into an array, or -1 if empty. */
+struct gve_index_list {
+       s16 head;
+       s16 tail;
+};
+
+/* Contains datapath state used to represent an RX queue. */
 struct gve_rx_ring {
        struct gve_priv *gve;
-       struct gve_rx_desc_queue desc;
-       struct gve_rx_data_queue data;
+       union {
+               /* GQI fields */
+               struct {
+                       struct gve_rx_desc_queue desc;
+                       struct gve_rx_data_queue data;
+
+                       /* threshold for posting new buffs and descs */
+                       u32 db_threshold;
+               };
+
+               /* DQO fields. */
+               struct {
+                       struct gve_rx_buf_queue_dqo bufq;
+                       struct gve_rx_compl_queue_dqo complq;
+
+                       struct gve_rx_buf_state_dqo *buf_states;
+                       u16 num_buf_states;
+
+                       /* Linked list of gve_rx_buf_state_dqo. Index into
+                        * buf_states, or -1 if empty.
+                        */
+                       s16 free_buf_states;
+
+                       /* Linked list of gve_rx_buf_state_dqo. Indexes into
+                        * buf_states, or -1 if empty.
+                        *
+                        * This list contains buf_states which are pointing to
+                        * valid buffers.
+                        *
+                        * We use a FIFO here in order to increase the
+                        * probability that buffers can be reused by increasing
+                        * the time between usages.
+                        */
+                       struct gve_index_list recycled_buf_states;
+
+                       /* Linked list of gve_rx_buf_state_dqo. Indexes into
+                        * buf_states, or -1 if empty.
+                        *
+                        * This list contains buf_states which have buffers
+                        * which cannot be reused yet.
+                        */
+                       struct gve_index_list used_buf_states;
+               } dqo;
+       };
+
        u64 rbytes; /* free-running bytes received */
        u64 rpackets; /* free-running packets received */
        u32 cnt; /* free-running total number of completed packets */
        u32 fill_cnt; /* free-running total number of descs and buffs posted */
        u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
-       u32 db_threshold; /* threshold for posting new buffs and descs */
        u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
        u64 rx_copied_pkt; /* free-running total number of copied packets */
        u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
@@ -97,6 +205,10 @@ struct gve_rx_ring {
        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
        dma_addr_t q_resources_bus; /* dma address for the queue resources */
        struct u64_stats_sync statss; /* sync stats for 32bit archs */
+
+       /* head and tail of skb chain for the current packet or NULL if none */
+       struct sk_buff *skb_head;
+       struct sk_buff *skb_tail;
 };
 
 /* A TX desc ring entry */
@@ -137,23 +249,161 @@ struct gve_tx_fifo {
        struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
 };
 
-/* A TX ring that contains a power-of-two sized desc ring and a FIFO buffer */
+/* TX descriptor for DQO format */
+union gve_tx_desc_dqo {
+       struct gve_tx_pkt_desc_dqo pkt;
+       struct gve_tx_tso_context_desc_dqo tso_ctx;
+       struct gve_tx_general_context_desc_dqo general_ctx;
+};
+
+enum gve_packet_state {
+       /* Packet is in free list, available to be allocated.
+        * This should always be zero since state is not explicitly initialized.
+        */
+       GVE_PACKET_STATE_UNALLOCATED,
+       /* Packet is expecting a regular data completion or miss completion */
+       GVE_PACKET_STATE_PENDING_DATA_COMPL,
+       /* Packet has received a miss completion and is expecting a
+        * re-injection completion.
+        */
+       GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
+       /* No valid completion received within the specified timeout. */
+       GVE_PACKET_STATE_TIMED_OUT_COMPL,
+};
+
+struct gve_tx_pending_packet_dqo {
+       struct sk_buff *skb; /* skb for this packet */
+
+       /* 0th element corresponds to the linear portion of `skb`, should be
+        * unmapped with `dma_unmap_single`.
+        *
+        * All others correspond to `skb`'s frags and should be unmapped with
+        * `dma_unmap_page`.
+        */
+       struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
+       u16 num_bufs;
+
+       /* Linked list index to next element in the list, or -1 if none */
+       s16 next;
+
+       /* Linked list index to prev element in the list, or -1 if none.
+        * Used for tracking either outstanding miss completions or prematurely
+        * freed packets.
+        */
+       s16 prev;
+
+       /* Identifies the current state of the packet as defined in
+        * `enum gve_packet_state`.
+        */
+       u8 state;
+
+       /* If packet is an outstanding miss completion, then the packet is
+        * freed if the corresponding re-injection completion is not received
+        * before kernel jiffies exceeds timeout_jiffies.
+        */
+       unsigned long timeout_jiffies;
+};
+
+/* Contains datapath state used to represent a TX queue. */
 struct gve_tx_ring {
        /* Cacheline 0 -- Accessed & dirtied during transmit */
-       struct gve_tx_fifo tx_fifo;
-       u32 req; /* driver tracked head pointer */
-       u32 done; /* driver tracked tail pointer */
+       union {
+               /* GQI fields */
+               struct {
+                       struct gve_tx_fifo tx_fifo;
+                       u32 req; /* driver tracked head pointer */
+                       u32 done; /* driver tracked tail pointer */
+               };
+
+               /* DQO fields. */
+               struct {
+                       /* Linked list of gve_tx_pending_packet_dqo. Index into
+                        * pending_packets, or -1 if empty.
+                        *
+                        * This is a consumer list owned by the TX path. When it
+                        * runs out, the producer list is stolen from the
+                        * completion handling path
+                        * (dqo_compl.free_pending_packets).
+                        */
+                       s16 free_pending_packets;
+
+                       /* Cached value of `dqo_compl.hw_tx_head` */
+                       u32 head;
+                       u32 tail; /* Last posted buffer index + 1 */
+
+                       /* Index of the last descriptor with "report event" bit
+                        * set.
+                        */
+                       u32 last_re_idx;
+               } dqo_tx;
+       };
 
        /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
-       __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */
+       union {
+               /* GQI fields */
+               struct {
+                       /* NIC tail pointer */
+                       __be32 last_nic_done;
+               };
+
+               /* DQO fields. */
+               struct {
+                       u32 head; /* Last read on compl_desc */
+
+                       /* Tracks the current gen bit of compl_q */
+                       u8 cur_gen_bit;
+
+                       /* Linked list of gve_tx_pending_packet_dqo. Index into
+                        * pending_packets, or -1 if empty.
+                        *
+                        * This is the producer list, owned by the completion
+                        * handling path. When the consumer list
+                        * (dqo_tx.free_pending_packets) is runs out, this list
+                        * will be stolen.
+                        */
+                       atomic_t free_pending_packets;
+
+                       /* Last TX ring index fetched by HW */
+                       atomic_t hw_tx_head;
+
+                       /* List to track pending packets which received a miss
+                        * completion but not a corresponding reinjection.
+                        */
+                       struct gve_index_list miss_completions;
+
+                       /* List to track pending packets that were completed
+                        * before receiving a valid completion because they
+                        * reached a specified timeout.
+                        */
+                       struct gve_index_list timed_out_completions;
+               } dqo_compl;
+       } ____cacheline_aligned;
        u64 pkt_done; /* free-running - total packets completed */
        u64 bytes_done; /* free-running - total bytes completed */
        u64 dropped_pkt; /* free-running - total packets dropped */
        u64 dma_mapping_error; /* count of dma mapping errors */
 
        /* Cacheline 2 -- Read-mostly fields */
-       union gve_tx_desc *desc ____cacheline_aligned;
-       struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */
+       union {
+               /* GQI fields */
+               struct {
+                       union gve_tx_desc *desc;
+
+                       /* Maps 1:1 to a desc */
+                       struct gve_tx_buffer_state *info;
+               };
+
+               /* DQO fields. */
+               struct {
+                       union gve_tx_desc_dqo *tx_ring;
+                       struct gve_tx_compl_desc *compl_ring;
+
+                       struct gve_tx_pending_packet_dqo *pending_packets;
+                       s16 num_pending_packets;
+
+                       u32 complq_mask; /* complq size is complq_mask + 1 */
+               } dqo;
+       } ____cacheline_aligned;
        struct netdev_queue *netdev_txq;
        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
        struct device *dev;
@@ -167,6 +417,7 @@ struct gve_tx_ring {
        u32 ntfy_id; /* notification block index */
        dma_addr_t bus; /* dma address of the descr ring */
        dma_addr_t q_resources_bus; /* dma address of the queue resources */
+       dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
        struct u64_stats_sync statss; /* sync stats for 32bit archs */
 } ____cacheline_aligned;
 
@@ -194,6 +445,31 @@ struct gve_qpl_config {
        unsigned long *qpl_id_map; /* bitmap of used qpl ids */
 };
 
+struct gve_options_dqo_rda {
+       u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
+       u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
+};
+
+struct gve_ptype {
+       u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
+       u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
+};
+
+struct gve_ptype_lut {
+       struct gve_ptype ptypes[GVE_NUM_PTYPES];
+};
+
+/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
+ * when the entire configure_device_resources command is zeroed out and the
+ * queue_format is not specified.
+ */
+enum gve_queue_format {
+       GVE_QUEUE_FORMAT_UNSPECIFIED    = 0x0,
+       GVE_GQI_RDA_FORMAT              = 0x1,
+       GVE_GQI_QPL_FORMAT              = 0x2,
+       GVE_DQO_RDA_FORMAT              = 0x3,
+};
+
 struct gve_priv {
        struct net_device *dev;
        struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -216,7 +492,6 @@ struct gve_priv {
        u64 num_registered_pages; /* num pages registered with NIC */
        u32 rx_copybreak; /* copy packets smaller than this */
        u16 default_num_queues; /* default num queues to set up */
-       u8 raw_addressing; /* 1 if this dev supports raw addressing, 0 otherwise */
 
        struct gve_queue_config tx_cfg;
        struct gve_queue_config rx_cfg;
@@ -251,6 +526,7 @@ struct gve_priv {
        u32 adminq_set_driver_parameter_cnt;
        u32 adminq_report_stats_cnt;
        u32 adminq_report_link_speed_cnt;
+       u32 adminq_get_ptype_map_cnt;
 
        /* Global stats */
        u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -275,6 +551,14 @@ struct gve_priv {
 
        /* Gvnic device link speed from hypervisor. */
        u64 link_speed;
+
+       struct gve_options_dqo_rda options_dqo_rda;
+       struct gve_ptype_lut *ptype_lut_dqo;
+
+       /* Must be a power of two. */
+       int data_buffer_size_dqo;
+
+       enum gve_queue_format queue_format;
 };
 
 enum gve_service_task_flags_bit {
@@ -454,14 +738,20 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
  */
 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
 {
-       return priv->raw_addressing ? 0 : priv->tx_cfg.num_queues;
+       if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+               return 0;
+
+       return priv->tx_cfg.num_queues;
 }
 
 /* Returns the number of rx queue page lists
  */
 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
 {
-       return priv->raw_addressing ? 0 : priv->rx_cfg.num_queues;
+       if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+               return 0;
+
+       return priv->rx_cfg.num_queues;
 }
 
 /* Returns a pointer to the next available tx qpl in the list of qpls
@@ -515,6 +805,12 @@ static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
                return DMA_FROM_DEVICE;
 }
 
+static inline bool gve_is_gqi(struct gve_priv *priv)
+{
+       return priv->queue_format == GVE_GQI_RDA_FORMAT ||
+               priv->queue_format == GVE_GQI_QPL_FORMAT;
+}
+
 /* buffers */
 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
                   struct page **page, dma_addr_t *dma,
@@ -525,14 +821,14 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
 bool gve_tx_poll(struct gve_notify_block *block, int budget);
 int gve_tx_alloc_rings(struct gve_priv *priv);
-void gve_tx_free_rings(struct gve_priv *priv);
+void gve_tx_free_rings_gqi(struct gve_priv *priv);
 __be32 gve_tx_load_event_counter(struct gve_priv *priv,
                                 struct gve_tx_ring *tx);
 /* rx handling */
 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
 bool gve_rx_poll(struct gve_notify_block *block, int budget);
 int gve_rx_alloc_rings(struct gve_priv *priv);
-void gve_rx_free_rings(struct gve_priv *priv);
+void gve_rx_free_rings_gqi(struct gve_priv *priv);
 bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
                       netdev_features_t feat);
 /* Reset */
index 53864f2..5bb56b4 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include <linux/etherdevice.h>
@@ -18,6 +18,8 @@
 "Expected: length=%d, feature_mask=%x.\n" \
 "Actual: length=%d, feature_mask=%x.\n"
 
+#define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
+
 static
 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
                                              struct gve_device_option *option)
@@ -33,28 +35,81 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
 static
 void gve_parse_device_option(struct gve_priv *priv,
                             struct gve_device_descriptor *device_descriptor,
-                            struct gve_device_option *option)
+                            struct gve_device_option *option,
+                            struct gve_device_option_gqi_rda **dev_op_gqi_rda,
+                            struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+                            struct gve_device_option_dqo_rda **dev_op_dqo_rda)
 {
+       u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
        u16 option_length = be16_to_cpu(option->option_length);
        u16 option_id = be16_to_cpu(option->option_id);
 
+       /* If the length or feature mask doesn't match, continue without
+        * enabling the feature.
+        */
        switch (option_id) {
-       case GVE_DEV_OPT_ID_RAW_ADDRESSING:
-               /* If the length or feature mask doesn't match,
-                * continue without enabling the feature.
-                */
-               if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
-                   option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) {
-                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing",
-                                GVE_DEV_OPT_LEN_RAW_ADDRESSING,
-                                cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING),
-                                option_length, option->feat_mask);
-                       priv->raw_addressing = 0;
-               } else {
-                       dev_info(&priv->pdev->dev,
-                                "Raw addressing device option enabled.\n");
-                       priv->raw_addressing = 1;
+       case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
+               if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "Raw Addressing",
+                                GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
+                                GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               dev_info(&priv->pdev->dev,
+                        "Gqi raw addressing device option enabled.\n");
+               priv->queue_format = GVE_GQI_RDA_FORMAT;
+               break;
+       case GVE_DEV_OPT_ID_GQI_RDA:
+               if (option_length < sizeof(**dev_op_gqi_rda) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_gqi_rda)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
+               }
+               *dev_op_gqi_rda = (void *)(option + 1);
+               break;
+       case GVE_DEV_OPT_ID_GQI_QPL:
+               if (option_length < sizeof(**dev_op_gqi_qpl) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_gqi_qpl)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
+               }
+               *dev_op_gqi_qpl = (void *)(option + 1);
+               break;
+       case GVE_DEV_OPT_ID_DQO_RDA:
+               if (option_length < sizeof(**dev_op_dqo_rda) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_dqo_rda)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
                }
+               *dev_op_dqo_rda = (void *)(option + 1);
                break;
        default:
                /* If we don't recognize the option just continue
@@ -65,6 +120,39 @@ void gve_parse_device_option(struct gve_priv *priv,
        }
 }
 
+/* Process all device options for a given describe device call. */
+static int
+gve_process_device_options(struct gve_priv *priv,
+                          struct gve_device_descriptor *descriptor,
+                          struct gve_device_option_gqi_rda **dev_op_gqi_rda,
+                          struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+                          struct gve_device_option_dqo_rda **dev_op_dqo_rda)
+{
+       const int num_options = be16_to_cpu(descriptor->num_device_options);
+       struct gve_device_option *dev_opt;
+       int i;
+
+       /* The options struct directly follows the device descriptor. */
+       dev_opt = (void *)(descriptor + 1);
+       for (i = 0; i < num_options; i++) {
+               struct gve_device_option *next_opt;
+
+               next_opt = gve_get_next_option(descriptor, dev_opt);
+               if (!next_opt) {
+                       dev_err(&priv->dev->dev,
+                               "options exceed device_descriptor's total length.\n");
+                       return -EINVAL;
+               }
+
+               gve_parse_device_option(priv, descriptor, dev_opt,
+                                       dev_op_gqi_rda, dev_op_gqi_qpl,
+                                       dev_op_dqo_rda);
+               dev_opt = next_opt;
+       }
+
+       return 0;
+}
+
 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
 {
        priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
@@ -88,6 +176,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
        priv->adminq_set_driver_parameter_cnt = 0;
        priv->adminq_report_stats_cnt = 0;
        priv->adminq_report_link_speed_cnt = 0;
+       priv->adminq_get_ptype_map_cnt = 0;
 
        /* Setup Admin queue with the device */
        iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
@@ -293,6 +382,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
        case GVE_ADMINQ_REPORT_LINK_SPEED:
                priv->adminq_report_link_speed_cnt++;
                break;
+       case GVE_ADMINQ_GET_PTYPE_MAP:
+               priv->adminq_get_ptype_map_cnt++;
+               break;
        default:
                dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
        }
@@ -305,7 +397,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
  * The caller is also responsible for making sure there are no commands
  * waiting to be executed.
  */
-static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
+static int gve_adminq_execute_cmd(struct gve_priv *priv,
+                                 union gve_adminq_command *cmd_orig)
 {
        u32 tail, head;
        int err;
@@ -350,6 +443,7 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
                .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
                .ntfy_blk_msix_base_idx =
                                        cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
+               .queue_format = priv->queue_format,
        };
 
        return gve_adminq_execute_cmd(priv, &cmd);
@@ -369,27 +463,32 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
 {
        struct gve_tx_ring *tx = &priv->tx[queue_index];
        union gve_adminq_command cmd;
-       u32 qpl_id;
-       int err;
 
-       qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
        memset(&cmd, 0, sizeof(cmd));
        cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
        cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
                .queue_id = cpu_to_be32(queue_index),
-               .reserved = 0,
                .queue_resources_addr =
                        cpu_to_be64(tx->q_resources_bus),
                .tx_ring_addr = cpu_to_be64(tx->bus),
-               .queue_page_list_id = cpu_to_be32(qpl_id),
                .ntfy_id = cpu_to_be32(tx->ntfy_id),
        };
 
-       err = gve_adminq_issue_cmd(priv, &cmd);
-       if (err)
-               return err;
+       if (gve_is_gqi(priv)) {
+               u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
+                       GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
+
+               cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+       } else {
+               cmd.create_tx_queue.tx_ring_size =
+                       cpu_to_be16(priv->tx_desc_cnt);
+               cmd.create_tx_queue.tx_comp_ring_addr =
+                       cpu_to_be64(tx->complq_bus_dqo);
+               cmd.create_tx_queue.tx_comp_ring_size =
+                       cpu_to_be16(priv->options_dqo_rda.tx_comp_ring_entries);
+       }
 
-       return 0;
+       return gve_adminq_issue_cmd(priv, &cmd);
 }
 
 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
@@ -410,28 +509,41 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
 {
        struct gve_rx_ring *rx = &priv->rx[queue_index];
        union gve_adminq_command cmd;
-       u32 qpl_id;
-       int err;
 
-       qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
        memset(&cmd, 0, sizeof(cmd));
        cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
        cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
                .queue_id = cpu_to_be32(queue_index),
-               .index = cpu_to_be32(queue_index),
-               .reserved = 0,
                .ntfy_id = cpu_to_be32(rx->ntfy_id),
                .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
-               .rx_desc_ring_addr = cpu_to_be64(rx->desc.bus),
-               .rx_data_ring_addr = cpu_to_be64(rx->data.data_bus),
-               .queue_page_list_id = cpu_to_be32(qpl_id),
        };
 
-       err = gve_adminq_issue_cmd(priv, &cmd);
-       if (err)
-               return err;
+       if (gve_is_gqi(priv)) {
+               u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
+                       GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
+
+               cmd.create_rx_queue.rx_desc_ring_addr =
+                       cpu_to_be64(rx->desc.bus),
+               cmd.create_rx_queue.rx_data_ring_addr =
+                       cpu_to_be64(rx->data.data_bus),
+               cmd.create_rx_queue.index = cpu_to_be32(queue_index);
+               cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+       } else {
+               cmd.create_rx_queue.rx_ring_size =
+                       cpu_to_be16(priv->rx_desc_cnt);
+               cmd.create_rx_queue.rx_desc_ring_addr =
+                       cpu_to_be64(rx->dqo.complq.bus);
+               cmd.create_rx_queue.rx_data_ring_addr =
+                       cpu_to_be64(rx->dqo.bufq.bus);
+               cmd.create_rx_queue.packet_buffer_size =
+                       cpu_to_be16(priv->data_buffer_size_dqo);
+               cmd.create_rx_queue.rx_buff_ring_size =
+                       cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries);
+               cmd.create_rx_queue.enable_rsc =
+                       !!(priv->dev->features & NETIF_F_LRO);
+       }
 
-       return 0;
+       return gve_adminq_issue_cmd(priv, &cmd);
 }
 
 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
@@ -512,17 +624,51 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
        return gve_adminq_kick_and_wait(priv);
 }
 
+static int gve_set_desc_cnt(struct gve_priv *priv,
+                           struct gve_device_descriptor *descriptor)
+{
+       priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+       if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
+               dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
+                       priv->tx_desc_cnt);
+               return -EINVAL;
+       }
+       priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+       if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
+           < PAGE_SIZE) {
+               dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
+                       priv->rx_desc_cnt);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int
+gve_set_desc_cnt_dqo(struct gve_priv *priv,
+                    const struct gve_device_descriptor *descriptor,
+                    const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
+{
+       priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+       priv->options_dqo_rda.tx_comp_ring_entries =
+               be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
+       priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+       priv->options_dqo_rda.rx_buff_ring_entries =
+               be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
+
+       return 0;
+}
+
 int gve_adminq_describe_device(struct gve_priv *priv)
 {
+       struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
+       struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
+       struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
        struct gve_device_descriptor *descriptor;
-       struct gve_device_option *dev_opt;
        union gve_adminq_command cmd;
        dma_addr_t descriptor_bus;
-       u16 num_options;
        int err = 0;
        u8 *mac;
        u16 mtu;
-       int i;
 
        memset(&cmd, 0, sizeof(cmd));
        descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
@@ -540,21 +686,41 @@ int gve_adminq_describe_device(struct gve_priv *priv)
        if (err)
                goto free_device_descriptor;
 
-       priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
-       if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
-               dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
-               err = -EINVAL;
+       err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
+                                        &dev_op_gqi_qpl, &dev_op_dqo_rda);
+       if (err)
                goto free_device_descriptor;
+
+       /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
+        * is not set to GqiRda, choose the queue format in a priority order:
+        * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
+        */
+       if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI RDA queue format.\n");
+       } else if (dev_op_dqo_rda) {
+               priv->queue_format = GVE_DQO_RDA_FORMAT;
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with DQO RDA queue format.\n");
+       } else if (dev_op_gqi_rda) {
+               priv->queue_format = GVE_GQI_RDA_FORMAT;
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI RDA queue format.\n");
+       } else {
+               priv->queue_format = GVE_GQI_QPL_FORMAT;
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI QPL queue format.\n");
        }
-       priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
-       if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
-           < PAGE_SIZE ||
-           priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
-           < PAGE_SIZE) {
-               dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt);
-               err = -EINVAL;
-               goto free_device_descriptor;
+       if (gve_is_gqi(priv)) {
+               err = gve_set_desc_cnt(priv, descriptor);
+       } else {
+               /* DQO supports LRO. */
+               priv->dev->hw_features |= NETIF_F_LRO;
+               err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
        }
+       if (err)
+               goto free_device_descriptor;
+
        priv->max_registered_pages =
                                be64_to_cpu(descriptor->max_registered_pages);
        mtu = be16_to_cpu(descriptor->mtu);
@@ -570,32 +736,16 @@ int gve_adminq_describe_device(struct gve_priv *priv)
        dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
        priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
        priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
-       if (priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
+
+       if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
                dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
                        priv->rx_data_slot_cnt);
                priv->rx_desc_cnt = priv->rx_data_slot_cnt;
        }
        priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
-       dev_opt = (void *)(descriptor + 1);
-
-       num_options = be16_to_cpu(descriptor->num_device_options);
-       for (i = 0; i < num_options; i++) {
-               struct gve_device_option *next_opt;
-
-               next_opt = gve_get_next_option(descriptor, dev_opt);
-               if (!next_opt) {
-                       dev_err(&priv->dev->dev,
-                               "options exceed device_descriptor's total length.\n");
-                       err = -EINVAL;
-                       goto free_device_descriptor;
-               }
-
-               gve_parse_device_option(priv, descriptor, dev_opt);
-               dev_opt = next_opt;
-       }
 
 free_device_descriptor:
-       dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
+       dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
                          descriptor_bus);
        return err;
 }
@@ -701,3 +851,41 @@ int gve_adminq_report_link_speed(struct gve_priv *priv)
                          link_speed_region_bus);
        return err;
 }
+
+int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+                                struct gve_ptype_lut *ptype_lut)
+{
+       struct gve_ptype_map *ptype_map;
+       union gve_adminq_command cmd;
+       dma_addr_t ptype_map_bus;
+       int err = 0;
+       int i;
+
+       memset(&cmd, 0, sizeof(cmd));
+       ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map),
+                                      &ptype_map_bus, GFP_KERNEL);
+       if (!ptype_map)
+               return -ENOMEM;
+
+       cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
+       cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
+               .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
+               .ptype_map_addr = cpu_to_be64(ptype_map_bus),
+       };
+
+       err = gve_adminq_execute_cmd(priv, &cmd);
+       if (err)
+               goto err;
+
+       /* Populate ptype_lut. */
+       for (i = 0; i < GVE_NUM_PTYPES; i++) {
+               ptype_lut->ptypes[i].l3_type =
+                       ptype_map->ptypes[i].l3_type;
+               ptype_lut->ptypes[i].l4_type =
+                       ptype_map->ptypes[i].l4_type;
+       }
+err:
+       dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map,
+                         ptype_map_bus);
+       return err;
+}
index d320c2f..47c3d8f 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
  * Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #ifndef _GVE_ADMINQ_H
@@ -22,7 +22,8 @@ enum gve_adminq_opcodes {
        GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
        GVE_ADMINQ_SET_DRIVER_PARAMETER         = 0xB,
        GVE_ADMINQ_REPORT_STATS                 = 0xC,
-       GVE_ADMINQ_REPORT_LINK_SPEED    = 0xD
+       GVE_ADMINQ_REPORT_LINK_SPEED            = 0xD,
+       GVE_ADMINQ_GET_PTYPE_MAP                = 0xE,
 };
 
 /* Admin queue status codes */
@@ -82,14 +83,54 @@ static_assert(sizeof(struct gve_device_descriptor) == 40);
 struct gve_device_option {
        __be16 option_id;
        __be16 option_length;
-       __be32 feat_mask;
+       __be32 required_features_mask;
 };
 
 static_assert(sizeof(struct gve_device_option) == 8);
 
-#define GVE_DEV_OPT_ID_RAW_ADDRESSING 0x1
-#define GVE_DEV_OPT_LEN_RAW_ADDRESSING 0x0
-#define GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING 0x0
+struct gve_device_option_gqi_rda {
+       __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
+
+struct gve_device_option_gqi_qpl {
+       __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
+
+struct gve_device_option_dqo_rda {
+       __be32 supported_features_mask;
+       __be16 tx_comp_ring_entries;
+       __be16 rx_buff_ring_entries;
+};
+
+static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
+
+/* Terminology:
+ *
+ * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
+ *       mapped and read/updated by the device.
+ *
+ * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
+ *       the device for read/write and data is copied from/to SKBs.
+ */
+enum gve_dev_opt_id {
+       GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
+       GVE_DEV_OPT_ID_GQI_RDA = 0x2,
+       GVE_DEV_OPT_ID_GQI_QPL = 0x3,
+       GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+};
+
+enum gve_dev_opt_req_feat_mask {
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+};
+
+#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
 
 struct gve_adminq_configure_device_resources {
        __be64 counter_array;
@@ -98,9 +139,11 @@ struct gve_adminq_configure_device_resources {
        __be32 num_irq_dbs;
        __be32 irq_db_stride;
        __be32 ntfy_blk_msix_base_idx;
+       u8 queue_format;
+       u8 padding[7];
 };
 
-static_assert(sizeof(struct gve_adminq_configure_device_resources) == 32);
+static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
 
 struct gve_adminq_register_page_list {
        __be32 page_list_id;
@@ -125,9 +168,13 @@ struct gve_adminq_create_tx_queue {
        __be64 tx_ring_addr;
        __be32 queue_page_list_id;
        __be32 ntfy_id;
+       __be64 tx_comp_ring_addr;
+       __be16 tx_ring_size;
+       __be16 tx_comp_ring_size;
+       u8 padding[4];
 };
 
-static_assert(sizeof(struct gve_adminq_create_tx_queue) == 32);
+static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
 
 struct gve_adminq_create_rx_queue {
        __be32 queue_id;
@@ -138,10 +185,14 @@ struct gve_adminq_create_rx_queue {
        __be64 rx_desc_ring_addr;
        __be64 rx_data_ring_addr;
        __be32 queue_page_list_id;
-       u8 padding[4];
+       __be16 rx_ring_size;
+       __be16 packet_buffer_size;
+       __be16 rx_buff_ring_size;
+       u8 enable_rsc;
+       u8 padding[5];
 };
 
-static_assert(sizeof(struct gve_adminq_create_rx_queue) == 48);
+static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
 
 /* Queue resources that are shared with the device */
 struct gve_queue_resources {
@@ -226,6 +277,41 @@ enum gve_stat_names {
        RX_DROPS_INVALID_CHECKSUM       = 68,
 };
 
+enum gve_l3_type {
+       /* Must be zero so zero initialized LUT is unknown. */
+       GVE_L3_TYPE_UNKNOWN = 0,
+       GVE_L3_TYPE_OTHER,
+       GVE_L3_TYPE_IPV4,
+       GVE_L3_TYPE_IPV6,
+};
+
+enum gve_l4_type {
+       /* Must be zero so zero initialized LUT is unknown. */
+       GVE_L4_TYPE_UNKNOWN = 0,
+       GVE_L4_TYPE_OTHER,
+       GVE_L4_TYPE_TCP,
+       GVE_L4_TYPE_UDP,
+       GVE_L4_TYPE_ICMP,
+       GVE_L4_TYPE_SCTP,
+};
+
+/* These are control path types for PTYPE which are the same as the data path
+ * types.
+ */
+struct gve_ptype_entry {
+       u8 l3_type;
+       u8 l4_type;
+};
+
+struct gve_ptype_map {
+       struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
+};
+
+struct gve_adminq_get_ptype_map {
+       __be64 ptype_map_len;
+       __be64 ptype_map_addr;
+};
+
 union gve_adminq_command {
        struct {
                __be32 opcode;
@@ -243,6 +329,7 @@ union gve_adminq_command {
                        struct gve_adminq_set_driver_parameter set_driver_param;
                        struct gve_adminq_report_stats report_stats;
                        struct gve_adminq_report_link_speed report_link_speed;
+                       struct gve_adminq_get_ptype_map get_ptype_map;
                };
        };
        u8 reserved[64];
@@ -271,4 +358,9 @@ int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
                            dma_addr_t stats_report_addr, u64 interval);
 int gve_adminq_report_link_speed(struct gve_priv *priv);
+
+struct gve_ptype_lut;
+int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+                                struct gve_ptype_lut *ptype_lut);
+
 #endif /* _GVE_ADMINQ_H */
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
new file mode 100644 (file)
index 0000000..e8fe9ad
--- /dev/null
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+/* GVE DQO Descriptor formats */
+
+#ifndef _GVE_DESC_DQO_H_
+#define _GVE_DESC_DQO_H_
+
+#include <linux/build_bug.h>
+
+#define GVE_TX_MAX_HDR_SIZE_DQO 255
+#define GVE_TX_MIN_TSO_MSS_DQO 88
+
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#error "Only little endian supported"
+#endif
+
+/* Basic TX descriptor (DTYPE 0x0C) */
+struct gve_tx_pkt_desc_dqo {
+       __le64 buf_addr;
+
+       /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */
+       u8 dtype: 5;
+
+       /* Denotes the last descriptor of a packet. */
+       u8 end_of_packet: 1;
+       u8 checksum_offload_enable: 1;
+
+       /* If set, will generate a descriptor completion for this descriptor. */
+       u8 report_event: 1;
+       u8 reserved0;
+       __le16 reserved1;
+
+       /* The TX completion associated with this packet will contain this tag.
+        */
+       __le16 compl_tag;
+       u16 buf_size: 14;
+       u16 reserved2: 2;
+} __packed;
+static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16);
+
+#define GVE_TX_PKT_DESC_DTYPE_DQO 0xc
+#define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1)
+
+/* Maximum number of data descriptors allowed per packet, or per-TSO segment. */
+#define GVE_TX_MAX_DATA_DESCS 10
+
+/* Min gap between tail and head to avoid cacheline overlap */
+#define GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP 4
+
+/* "report_event" on TX packet descriptors may only be reported on the last
+ * descriptor of a TX packet, and they must be spaced apart with at least this
+ * value.
+ */
+#define GVE_TX_MIN_RE_INTERVAL 32
+
+struct gve_tx_context_cmd_dtype {
+       u8 dtype: 5;
+       u8 tso: 1;
+       u8 reserved1: 2;
+
+       u8 reserved2;
+};
+
+static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2);
+
+/* TX Native TSO Context DTYPE (0x05)
+ *
+ * "flex" fields allow the driver to send additional packet context to HW.
+ */
+struct gve_tx_tso_context_desc_dqo {
+       /* The L4 payload bytes that should be segmented. */
+       u32 tso_total_len: 24;
+       u32 flex10: 8;
+
+       /* Max segment size in TSO excluding headers. */
+       u16 mss: 14;
+       u16 reserved: 2;
+
+       u8 header_len; /* Header length to use for TSO offload */
+       u8 flex11;
+       struct gve_tx_context_cmd_dtype cmd_dtype;
+       u8 flex0;
+       u8 flex5;
+       u8 flex6;
+       u8 flex7;
+       u8 flex8;
+       u8 flex9;
+} __packed;
+static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16);
+
+#define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5
+
+/* General context descriptor for sending metadata. */
+struct gve_tx_general_context_desc_dqo {
+       u8 flex4;
+       u8 flex5;
+       u8 flex6;
+       u8 flex7;
+       u8 flex8;
+       u8 flex9;
+       u8 flex10;
+       u8 flex11;
+       struct gve_tx_context_cmd_dtype cmd_dtype;
+       u16 reserved;
+       u8 flex0;
+       u8 flex1;
+       u8 flex2;
+       u8 flex3;
+} __packed;
+static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16);
+
+#define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4
+
+/* Logical structure of metadata which is packed into context descriptor flex
+ * fields.
+ */
+struct gve_tx_metadata_dqo {
+       union {
+               struct {
+                       u8 version;
+
+                       /* If `skb->l4_hash` is set, this value should be
+                        * derived from `skb->hash`.
+                        *
+                        * A zero value means no l4_hash was associated with the
+                        * skb.
+                        */
+                       u16 path_hash: 15;
+
+                       /* Should be set to 1 if the flow associated with the
+                        * skb had a rehash from the TCP stack.
+                        */
+                       u16 rehash_event: 1;
+               }  __packed;
+               u8 bytes[12];
+       };
+}  __packed;
+static_assert(sizeof(struct gve_tx_metadata_dqo) == 12);
+
+#define GVE_TX_METADATA_VERSION_DQO 0
+
+/* TX completion descriptor */
+struct gve_tx_compl_desc {
+       /* For types 0-4 this is the TX queue ID associated with this
+        * completion.
+        */
+       u16 id: 11;
+
+       /* See: GVE_COMPL_TYPE_DQO* */
+       u16 type: 3;
+       u16 reserved0: 1;
+
+       /* Flipped by HW to notify the descriptor is populated. */
+       u16 generation: 1;
+       union {
+               /* For descriptor completions, this is the last index fetched
+                * by HW + 1.
+                */
+               __le16 tx_head;
+
+               /* For packet completions, this is the completion tag set on the
+                * TX packet descriptors.
+                */
+               __le16 completion_tag;
+       };
+       __le32 reserved1;
+} __packed;
+static_assert(sizeof(struct gve_tx_compl_desc) == 8);
+
+#define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */
+#define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */
+#define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */
+#define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */
+
+/* Descriptor to post buffers to HW on buffer queue. */
+struct gve_rx_desc_dqo {
+       __le16 buf_id; /* ID returned in Rx completion descriptor */
+       __le16 reserved0;
+       __le32 reserved1;
+       __le64 buf_addr; /* DMA address of the buffer */
+       __le64 header_buf_addr;
+       __le64 reserved2;
+} __packed;
+static_assert(sizeof(struct gve_rx_desc_dqo) == 32);
+
+/* Descriptor for HW to notify SW of new packets received on RX queue. */
+struct gve_rx_compl_desc_dqo {
+       /* Must be 1 */
+       u8 rxdid: 4;
+       u8 reserved0: 4;
+
+       /* Packet originated from this system rather than the network. */
+       u8 loopback: 1;
+       /* Set when IPv6 packet contains a destination options header or routing
+        * header.
+        */
+       u8 ipv6_ex_add: 1;
+       /* Invalid packet was received. */
+       u8 rx_error: 1;
+       u8 reserved1: 5;
+
+       u16 packet_type: 10;
+       u16 ip_hdr_err: 1;
+       u16 udp_len_err: 1;
+       u16 raw_cs_invalid: 1;
+       u16 reserved2: 3;
+
+       u16 packet_len: 14;
+       /* Flipped by HW to notify the descriptor is populated. */
+       u16 generation: 1;
+       /* Should be zero. */
+       u16 buffer_queue_id: 1;
+
+       u16 header_len: 10;
+       u16 rsc: 1;
+       u16 split_header: 1;
+       u16 reserved3: 4;
+
+       u8 descriptor_done: 1;
+       u8 end_of_packet: 1;
+       u8 header_buffer_overflow: 1;
+       u8 l3_l4_processed: 1;
+       u8 csum_ip_err: 1;
+       u8 csum_l4_err: 1;
+       u8 csum_external_ip_err: 1;
+       u8 csum_external_udp_err: 1;
+
+       u8 status_error1;
+
+       __le16 reserved5;
+       __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
+
+       union {
+               /* Packet checksum. */
+               __le16 raw_cs;
+               /* Segment length for RSC packets. */
+               __le16 rsc_seg_len;
+       };
+       __le32 hash;
+       __le32 reserved6;
+       __le64 reserved7;
+} __packed;
+
+static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
+
+/* Ringing the doorbell too often can hurt performance.
+ *
+ * HW requires this value to be at least 8.
+ */
+#define GVE_RX_BUF_THRESH_DQO 32
+
+#endif /* _GVE_DESC_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
new file mode 100644 (file)
index 0000000..8360423
--- /dev/null
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#ifndef _GVE_DQO_H_
+#define _GVE_DQO_H_
+
+#include "gve_adminq.h"
+
+#define GVE_ITR_ENABLE_BIT_DQO BIT(0)
+#define GVE_ITR_CLEAR_PBA_BIT_DQO BIT(1)
+#define GVE_ITR_NO_UPDATE_DQO (3 << 3)
+
+#define GVE_ITR_INTERVAL_DQO_SHIFT 5
+#define GVE_ITR_INTERVAL_DQO_MASK ((1 << 12) - 1)
+
+#define GVE_TX_IRQ_RATELIMIT_US_DQO 50
+#define GVE_RX_IRQ_RATELIMIT_US_DQO 20
+
+/* Timeout in seconds to wait for a reinjection completion after receiving
+ * its corresponding miss completion.
+ */
+#define GVE_REINJECT_COMPL_TIMEOUT 1
+
+/* Timeout in seconds to deallocate the completion tag for a packet that was
+ * prematurely freed for not receiving a valid completion. This should be large
+ * enough to rule out the possibility of receiving the corresponding valid
+ * completion after this interval.
+ */
+#define GVE_DEALLOCATE_COMPL_TIMEOUT 60
+
+netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
+bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
+int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
+void gve_tx_free_rings_dqo(struct gve_priv *priv);
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv);
+void gve_rx_free_rings_dqo(struct gve_priv *priv);
+int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+                         struct napi_struct *napi);
+void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
+void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
+
+static inline void
+gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
+                       const struct gve_queue_resources *q_resources, u32 val)
+{
+       u64 index;
+
+       index = be32_to_cpu(q_resources->db_index);
+       iowrite32(val, &priv->db_bar2[index]);
+}
+
+/* Builds register value to write to DQO IRQ doorbell to enable with specified
+ * ratelimit.
+ */
+static inline u32 gve_set_itr_ratelimit_dqo(u32 ratelimit_us)
+{
+       u32 result = GVE_ITR_ENABLE_BIT_DQO;
+
+       /* Interval has 2us granularity. */
+       ratelimit_us >>= 1;
+
+       ratelimit_us &= GVE_ITR_INTERVAL_DQO_MASK;
+       result |= (ratelimit_us << GVE_ITR_INTERVAL_DQO_SHIFT);
+
+       return result;
+}
+
+static inline void
+gve_write_irq_doorbell_dqo(const struct gve_priv *priv,
+                          const struct gve_notify_block *block, u32 val)
+{
+       u32 index = be32_to_cpu(block->irq_db_index);
+
+       iowrite32(val, &priv->db_bar2[index]);
+}
+
+#endif /* _GVE_DQO_H_ */
index 5fb05cf..716e624 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include <linux/ethtool.h>
@@ -311,8 +311,16 @@ gve_get_ethtool_stats(struct net_device *netdev,
                for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
                        struct gve_tx_ring *tx = &priv->tx[ring];
 
-                       data[i++] = tx->req;
-                       data[i++] = tx->done;
+                       if (gve_is_gqi(priv)) {
+                               data[i++] = tx->req;
+                               data[i++] = tx->done;
+                       } else {
+                               /* DQO doesn't currently support
+                                * posted/completed descriptor counts;
+                                */
+                               data[i++] = 0;
+                               data[i++] = 0;
+                       }
                        do {
                                start =
                                  u64_stats_fetch_begin(&priv->tx[ring].statss);
@@ -453,11 +461,16 @@ static int gve_set_tunable(struct net_device *netdev,
 
        switch (etuna->id) {
        case ETHTOOL_RX_COPYBREAK:
+       {
+               u32 max_copybreak = gve_is_gqi(priv) ?
+                       (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
+
                len = *(u32 *)value;
-               if (len > PAGE_SIZE / 2)
+               if (len > max_copybreak)
                        return -EINVAL;
                priv->rx_copybreak = len;
                return 0;
+       }
        default:
                return -EOPNOTSUPP;
        }
index bbc423e..ac4819c 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include <linux/cpumask.h>
@@ -14,6 +14,7 @@
 #include <linux/workqueue.h>
 #include <net/sch_generic.h>
 #include "gve.h"
+#include "gve_dqo.h"
 #include "gve_adminq.h"
 #include "gve_register.h"
 
 const char gve_version_str[] = GVE_VERSION;
 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
 
+static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct gve_priv *priv = netdev_priv(dev);
+
+       if (gve_is_gqi(priv))
+               return gve_tx(skb, dev);
+       else
+               return gve_tx_dqo(skb, dev);
+}
+
 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
 {
        struct gve_priv *priv = netdev_priv(dev);
@@ -155,6 +166,15 @@ static irqreturn_t gve_intr(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t gve_intr_dqo(int irq, void *arg)
+{
+       struct gve_notify_block *block = arg;
+
+       /* Interrupts are automatically masked */
+       napi_schedule_irqoff(&block->napi);
+       return IRQ_HANDLED;
+}
+
 static int gve_napi_poll(struct napi_struct *napi, int budget)
 {
        struct gve_notify_block *block;
@@ -191,6 +211,54 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
        return 0;
 }
 
+static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+{
+       struct gve_notify_block *block =
+               container_of(napi, struct gve_notify_block, napi);
+       struct gve_priv *priv = block->priv;
+       bool reschedule = false;
+       int work_done = 0;
+
+       /* Clear PCI MSI-X Pending Bit Array (PBA)
+        *
+        * This bit is set if an interrupt event occurs while the vector is
+        * masked. If this bit is set and we reenable the interrupt, it will
+        * fire again. Since we're just about to poll the queue state, we don't
+        * need it to fire again.
+        *
+        * Under high softirq load, it's possible that the interrupt condition
+        * is triggered twice before we got the chance to process it.
+        */
+       gve_write_irq_doorbell_dqo(priv, block,
+                                  GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
+
+       if (block->tx)
+               reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+
+       if (block->rx) {
+               work_done = gve_rx_poll_dqo(block, budget);
+               reschedule |= work_done == budget;
+       }
+
+       if (reschedule)
+               return budget;
+
+       if (likely(napi_complete_done(napi, work_done))) {
+               /* Enable interrupts again.
+                *
+                * We don't need to repoll afterwards because HW supports the
+                * PCI MSI-X PBA feature.
+                *
+                * Another interrupt would be triggered if a new event came in
+                * since the last one.
+                */
+               gve_write_irq_doorbell_dqo(priv, block,
+                                          GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
+       }
+
+       return work_done;
+}
+
 static int gve_alloc_notify_blocks(struct gve_priv *priv)
 {
        int num_vecs_requested = priv->num_ntfy_blks + 1;
@@ -264,7 +332,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
                         name, i);
                block->priv = priv;
                err = request_irq(priv->msix_vectors[msix_idx].vector,
-                                 gve_intr, 0, block->name, block);
+                                 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
+                                 0, block->name, block);
                if (err) {
                        dev_err(&priv->pdev->dev,
                                "Failed to receive msix vector %d\n", i);
@@ -346,6 +415,22 @@ static int gve_setup_device_resources(struct gve_priv *priv)
                err = -ENXIO;
                goto abort_with_stats_report;
        }
+
+       if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+               priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
+                                              GFP_KERNEL);
+               if (!priv->ptype_lut_dqo) {
+                       err = -ENOMEM;
+                       goto abort_with_stats_report;
+               }
+               err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
+               if (err) {
+                       dev_err(&priv->pdev->dev,
+                               "Failed to get ptype map: err=%d\n", err);
+                       goto abort_with_ptype_lut;
+               }
+       }
+
        err = gve_adminq_report_stats(priv, priv->stats_report_len,
                                      priv->stats_report_bus,
                                      GVE_STATS_REPORT_TIMER_PERIOD);
@@ -354,12 +439,17 @@ static int gve_setup_device_resources(struct gve_priv *priv)
                        "Failed to report stats: err=%d\n", err);
        gve_set_device_resources_ok(priv);
        return 0;
+
+abort_with_ptype_lut:
+       kvfree(priv->ptype_lut_dqo);
+       priv->ptype_lut_dqo = NULL;
 abort_with_stats_report:
        gve_free_stats_report(priv);
 abort_with_ntfy_blocks:
        gve_free_notify_blocks(priv);
 abort_with_counter:
        gve_free_counter_array(priv);
+
        return err;
 }
 
@@ -386,17 +476,22 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
                        gve_trigger_reset(priv);
                }
        }
+
+       kvfree(priv->ptype_lut_dqo);
+       priv->ptype_lut_dqo = NULL;
+
        gve_free_counter_array(priv);
        gve_free_notify_blocks(priv);
        gve_free_stats_report(priv);
        gve_clear_device_resources_ok(priv);
 }
 
-static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
+static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
+                        int (*gve_poll)(struct napi_struct *, int))
 {
        struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
 
-       netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
+       netif_napi_add(priv->dev, &block->napi, gve_poll,
                       NAPI_POLL_WEIGHT);
 }
 
@@ -476,31 +571,75 @@ static int gve_create_rings(struct gve_priv *priv)
        netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
                  priv->rx_cfg.num_queues);
 
-       /* Rx data ring has been prefilled with packet buffers at queue
-        * allocation time.
-        * Write the doorbell to provide descriptor slots and packet buffers
-        * to the NIC.
-        */
-       for (i = 0; i < priv->rx_cfg.num_queues; i++)
-               gve_rx_write_doorbell(priv, &priv->rx[i]);
+       if (gve_is_gqi(priv)) {
+               /* Rx data ring has been prefilled with packet buffers at queue
+                * allocation time.
+                *
+                * Write the doorbell to provide descriptor slots and packet
+                * buffers to the NIC.
+                */
+               for (i = 0; i < priv->rx_cfg.num_queues; i++)
+                       gve_rx_write_doorbell(priv, &priv->rx[i]);
+       } else {
+               for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+                       /* Post buffers and ring doorbell. */
+                       gve_rx_post_buffers_dqo(&priv->rx[i]);
+               }
+       }
 
        return 0;
 }
 
+static void add_napi_init_sync_stats(struct gve_priv *priv,
+                                    int (*napi_poll)(struct napi_struct *napi,
+                                                     int budget))
+{
+       int i;
+
+       /* Add tx napi & init sync stats*/
+       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+               int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
+
+               u64_stats_init(&priv->tx[i].statss);
+               priv->tx[i].ntfy_id = ntfy_idx;
+               gve_add_napi(priv, ntfy_idx, napi_poll);
+       }
+       /* Add rx napi  & init sync stats*/
+       for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+               int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
+
+               u64_stats_init(&priv->rx[i].statss);
+               priv->rx[i].ntfy_id = ntfy_idx;
+               gve_add_napi(priv, ntfy_idx, napi_poll);
+       }
+}
+
+static void gve_tx_free_rings(struct gve_priv *priv)
+{
+       if (gve_is_gqi(priv)) {
+               gve_tx_free_rings_gqi(priv);
+       } else {
+               gve_tx_free_rings_dqo(priv);
+       }
+}
+
 static int gve_alloc_rings(struct gve_priv *priv)
 {
-       int ntfy_idx;
        int err;
-       int i;
 
        /* Setup tx rings */
        priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
                            GFP_KERNEL);
        if (!priv->tx)
                return -ENOMEM;
-       err = gve_tx_alloc_rings(priv);
+
+       if (gve_is_gqi(priv))
+               err = gve_tx_alloc_rings(priv);
+       else
+               err = gve_tx_alloc_rings_dqo(priv);
        if (err)
                goto free_tx;
+
        /* Setup rx rings */
        priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
                            GFP_KERNEL);
@@ -508,21 +647,18 @@ static int gve_alloc_rings(struct gve_priv *priv)
                err = -ENOMEM;
                goto free_tx_queue;
        }
-       err = gve_rx_alloc_rings(priv);
+
+       if (gve_is_gqi(priv))
+               err = gve_rx_alloc_rings(priv);
+       else
+               err = gve_rx_alloc_rings_dqo(priv);
        if (err)
                goto free_rx;
-       /* Add tx napi & init sync stats*/
-       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
-               u64_stats_init(&priv->tx[i].statss);
-               ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
-               gve_add_napi(priv, ntfy_idx);
-       }
-       /* Add rx napi  & init sync stats*/
-       for (i = 0; i < priv->rx_cfg.num_queues; i++) {
-               u64_stats_init(&priv->rx[i].statss);
-               ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
-               gve_add_napi(priv, ntfy_idx);
-       }
+
+       if (gve_is_gqi(priv))
+               add_napi_init_sync_stats(priv, gve_napi_poll);
+       else
+               add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
 
        return 0;
 
@@ -560,6 +696,14 @@ static int gve_destroy_rings(struct gve_priv *priv)
        return 0;
 }
 
+static void gve_rx_free_rings(struct gve_priv *priv)
+{
+       if (gve_is_gqi(priv))
+               gve_rx_free_rings_gqi(priv);
+       else
+               gve_rx_free_rings_dqo(priv);
+}
+
 static void gve_free_rings(struct gve_priv *priv)
 {
        int ntfy_idx;
@@ -681,7 +825,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
        int err;
 
        /* Raw addressing means no QPLs */
-       if (priv->raw_addressing)
+       if (priv->queue_format == GVE_GQI_RDA_FORMAT)
                return 0;
 
        priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
@@ -725,7 +869,7 @@ static void gve_free_qpls(struct gve_priv *priv)
        int i;
 
        /* Raw addressing means no QPLs */
-       if (priv->raw_addressing)
+       if (priv->queue_format == GVE_GQI_RDA_FORMAT)
                return;
 
        kvfree(priv->qpl_cfg.qpl_id_map);
@@ -759,6 +903,7 @@ static int gve_open(struct net_device *dev)
        err = gve_alloc_qpls(priv);
        if (err)
                return err;
+
        err = gve_alloc_rings(priv);
        if (err)
                goto free_qpls;
@@ -773,9 +918,17 @@ static int gve_open(struct net_device *dev)
        err = gve_register_qpls(priv);
        if (err)
                goto reset;
+
+       if (!gve_is_gqi(priv)) {
+               /* Hard code this for now. This may be tuned in the future for
+                * performance.
+                */
+               priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
+       }
        err = gve_create_rings(priv);
        if (err)
                goto reset;
+
        gve_set_device_rings_ok(priv);
 
        if (gve_get_report_stats(priv))
@@ -924,14 +1077,26 @@ static void gve_turnup(struct gve_priv *priv)
                struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
 
                napi_enable(&block->napi);
-               iowrite32be(0, gve_irq_doorbell(priv, block));
+               if (gve_is_gqi(priv)) {
+                       iowrite32be(0, gve_irq_doorbell(priv, block));
+               } else {
+                       u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
+
+                       gve_write_irq_doorbell_dqo(priv, block, val);
+               }
        }
        for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
                int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
                struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
 
                napi_enable(&block->napi);
-               iowrite32be(0, gve_irq_doorbell(priv, block));
+               if (gve_is_gqi(priv)) {
+                       iowrite32be(0, gve_irq_doorbell(priv, block));
+               } else {
+                       u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
+
+                       gve_write_irq_doorbell_dqo(priv, block, val);
+               }
        }
 
        gve_set_napi_enabled(priv);
@@ -945,12 +1110,49 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
        priv->tx_timeo_cnt++;
 }
 
+static int gve_set_features(struct net_device *netdev,
+                           netdev_features_t features)
+{
+       const netdev_features_t orig_features = netdev->features;
+       struct gve_priv *priv = netdev_priv(netdev);
+       int err;
+
+       if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
+               netdev->features ^= NETIF_F_LRO;
+               if (netif_carrier_ok(netdev)) {
+                       /* To make this process as simple as possible we
+                        * teardown the device, set the new configuration,
+                        * and then bring the device up again.
+                        */
+                       err = gve_close(netdev);
+                       /* We have already tried to reset in close, just fail
+                        * at this point.
+                        */
+                       if (err)
+                               goto err;
+
+                       err = gve_open(netdev);
+                       if (err)
+                               goto err;
+               }
+       }
+
+       return 0;
+err:
+       /* Reverts the change on error. */
+       netdev->features = orig_features;
+       netif_err(priv, drv, netdev,
+                 "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
+       return err;
+}
+
 static const struct net_device_ops gve_netdev_ops = {
-       .ndo_start_xmit         =       gve_tx,
+       .ndo_start_xmit         =       gve_start_xmit,
        .ndo_open               =       gve_open,
        .ndo_stop               =       gve_close,
        .ndo_get_stats64        =       gve_get_stats,
        .ndo_tx_timeout         =       gve_tx_timeout,
+       .ndo_set_features       =       gve_set_features,
 };
 
 static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -994,6 +1196,15 @@ void gve_handle_report_stats(struct gve_priv *priv)
        /* tx stats */
        if (priv->tx) {
                for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+                       u32 last_completion = 0;
+                       u32 tx_frames = 0;
+
+                       /* DQO doesn't currently support these metrics. */
+                       if (gve_is_gqi(priv)) {
+                               last_completion = priv->tx[idx].done;
+                               tx_frames = priv->tx[idx].req;
+                       }
+
                        do {
                                start = u64_stats_fetch_begin(&priv->tx[idx].statss);
                                tx_bytes = priv->tx[idx].bytes_done;
@@ -1010,7 +1221,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
                        };
                        stats[stats_idx++] = (struct stats) {
                                .stat_name = cpu_to_be32(TX_FRAMES_SENT),
-                               .value = cpu_to_be64(priv->tx[idx].req),
+                               .value = cpu_to_be64(tx_frames),
                                .queue_id = cpu_to_be32(idx),
                        };
                        stats[stats_idx++] = (struct stats) {
@@ -1020,7 +1231,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
                        };
                        stats[stats_idx++] = (struct stats) {
                                .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
-                               .value = cpu_to_be64(priv->tx[idx].done),
+                               .value = cpu_to_be64(last_completion),
                                .queue_id = cpu_to_be32(idx),
                        };
                }
@@ -1088,7 +1299,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
        if (skip_describe_device)
                goto setup_device;
 
-       priv->raw_addressing = false;
+       priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
        /* Get the initial information we need from the device */
        err = gve_adminq_describe_device(priv);
        if (err) {
@@ -1096,7 +1307,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
                        "Could not get device information: err=%d\n", err);
                goto err;
        }
-       if (priv->dev->max_mtu > PAGE_SIZE) {
+       if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
                priv->dev->max_mtu = PAGE_SIZE;
                err = gve_adminq_set_mtu(priv, priv->dev->mtu);
                if (err) {
@@ -1307,7 +1518,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, dev);
        dev->ethtool_ops = &gve_ethtool_ops;
        dev->netdev_ops = &gve_netdev_ops;
-       /* advertise features */
+
+       /* Set default and supported features.
+        *
+        * Features might be set in other locations as well (such as
+        * `gve_adminq_describe_device`).
+        */
        dev->hw_features = NETIF_F_HIGHDMA;
        dev->hw_features |= NETIF_F_SG;
        dev->hw_features |= NETIF_F_HW_CSUM;
@@ -1352,6 +1568,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto abort_with_wq;
 
        dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
+       dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
        gve_clear_probe_in_progress(priv);
        queue_work(priv->gve_wq, &priv->service_task);
        return 0;
index bf123fe..bb82613 100644 (file)
@@ -1,21 +1,14 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include "gve.h"
 #include "gve_adminq.h"
+#include "gve_utils.h"
 #include <linux/etherdevice.h>
 
-static void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
-{
-       struct gve_notify_block *block =
-                       &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
-
-       block->rx = NULL;
-}
-
 static void gve_rx_free_buffer(struct device *dev,
                               struct gve_rx_slot_page_info *page_info,
                               union gve_rx_data_slot *data_slot)
@@ -137,16 +130,6 @@ alloc_err:
        return err;
 }
 
-static void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
-{
-       u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
-       struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
-       struct gve_rx_ring *rx = &priv->rx[queue_idx];
-
-       block->rx = rx;
-       rx->ntfy_id = ntfy_idx;
-}
-
 static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
 {
        struct gve_rx_ring *rx = &priv->rx[idx];
@@ -165,7 +148,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
 
        slots = priv->rx_data_slot_cnt;
        rx->mask = slots - 1;
-       rx->data.raw_addressing = priv->raw_addressing;
+       rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
 
        /* alloc rx data ring */
        bytes = sizeof(*rx->data.data_ring) * slots;
@@ -255,7 +238,7 @@ int gve_rx_alloc_rings(struct gve_priv *priv)
        return err;
 }
 
-void gve_rx_free_rings(struct gve_priv *priv)
+void gve_rx_free_rings_gqi(struct gve_priv *priv)
 {
        int i;
 
@@ -279,27 +262,6 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
        return PKT_HASH_TYPE_L2;
 }
 
-static struct sk_buff *gve_rx_copy(struct net_device *dev,
-                                  struct napi_struct *napi,
-                                  struct gve_rx_slot_page_info *page_info,
-                                  u16 len)
-{
-       struct sk_buff *skb = napi_alloc_skb(napi, len);
-       void *va = page_info->page_address + GVE_RX_PAD +
-                  (page_info->page_offset ? PAGE_SIZE / 2 : 0);
-
-       if (unlikely(!skb))
-               return NULL;
-
-       __skb_put(skb, len);
-
-       skb_copy_to_linear_data(skb, va, len);
-
-       skb->protocol = eth_type_trans(skb, dev);
-
-       return skb;
-}
-
 static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
                                        struct gve_rx_slot_page_info *page_info,
                                        u16 len)
@@ -310,7 +272,7 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
                return NULL;
 
        skb_add_rx_frag(skb, 0, page_info->page,
-                       (page_info->page_offset ? PAGE_SIZE / 2 : 0) +
+                       page_info->page_offset +
                        GVE_RX_PAD, len, PAGE_SIZE / 2);
 
        return skb;
@@ -321,7 +283,7 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl
        const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
 
        /* "flip" to other packet buffer on this page */
-       page_info->page_offset ^= 0x1;
+       page_info->page_offset ^= PAGE_SIZE / 2;
        *(slot_addr) ^= offset;
 }
 
@@ -388,7 +350,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
                        gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
                }
        } else {
-               skb = gve_rx_copy(netdev, napi, page_info, len);
+               skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD);
                if (skb) {
                        u64_stats_update_begin(&rx->statss);
                        rx->rx_copied_pkt++;
@@ -430,7 +392,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
 
        if (len <= priv->rx_copybreak) {
                /* Just copy small packets */
-               skb = gve_rx_copy(dev, napi, page_info, len);
+               skb = gve_rx_copy(dev, napi, page_info, len, GVE_RX_PAD);
                u64_stats_update_begin(&rx->statss);
                rx->rx_copied_pkt++;
                rx->rx_copybreak_pkt++;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
new file mode 100644 (file)
index 0000000..8738db0
--- /dev/null
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_dqo.h"
+#include "gve_adminq.h"
+#include "gve_utils.h"
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/ip6_checksum.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+
+static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
+{
+       return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
+}
+
+static void gve_free_page_dqo(struct gve_priv *priv,
+                             struct gve_rx_buf_state_dqo *bs)
+{
+       page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
+       gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
+                     DMA_FROM_DEVICE);
+       bs->page_info.page = NULL;
+}
+
+static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
+{
+       struct gve_rx_buf_state_dqo *buf_state;
+       s16 buffer_id;
+
+       buffer_id = rx->dqo.free_buf_states;
+       if (unlikely(buffer_id == -1))
+               return NULL;
+
+       buf_state = &rx->dqo.buf_states[buffer_id];
+
+       /* Remove buf_state from free list */
+       rx->dqo.free_buf_states = buf_state->next;
+
+       /* Point buf_state to itself to mark it as allocated */
+       buf_state->next = buffer_id;
+
+       return buf_state;
+}
+
+static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
+                                      struct gve_rx_buf_state_dqo *buf_state)
+{
+       s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+       return buf_state->next == buffer_id;
+}
+
+static void gve_free_buf_state(struct gve_rx_ring *rx,
+                              struct gve_rx_buf_state_dqo *buf_state)
+{
+       s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+       buf_state->next = rx->dqo.free_buf_states;
+       rx->dqo.free_buf_states = buffer_id;
+}
+
+static struct gve_rx_buf_state_dqo *
+gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list)
+{
+       struct gve_rx_buf_state_dqo *buf_state;
+       s16 buffer_id;
+
+       buffer_id = list->head;
+       if (unlikely(buffer_id == -1))
+               return NULL;
+
+       buf_state = &rx->dqo.buf_states[buffer_id];
+
+       /* Remove buf_state from list */
+       list->head = buf_state->next;
+       if (buf_state->next == -1)
+               list->tail = -1;
+
+       /* Point buf_state to itself to mark it as allocated */
+       buf_state->next = buffer_id;
+
+       return buf_state;
+}
+
+static void gve_enqueue_buf_state(struct gve_rx_ring *rx,
+                                 struct gve_index_list *list,
+                                 struct gve_rx_buf_state_dqo *buf_state)
+{
+       s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+       buf_state->next = -1;
+
+       if (list->head == -1) {
+               list->head = buffer_id;
+               list->tail = buffer_id;
+       } else {
+               int tail = list->tail;
+
+               rx->dqo.buf_states[tail].next = buffer_id;
+               list->tail = buffer_id;
+       }
+}
+
+static struct gve_rx_buf_state_dqo *
+gve_get_recycled_buf_state(struct gve_rx_ring *rx)
+{
+       struct gve_rx_buf_state_dqo *buf_state;
+       int i;
+
+       /* Recycled buf states are immediately usable. */
+       buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
+       if (likely(buf_state))
+               return buf_state;
+
+       if (unlikely(rx->dqo.used_buf_states.head == -1))
+               return NULL;
+
+       /* Used buf states are only usable when ref count reaches 0, which means
+        * no SKBs refer to them.
+        *
+        * Search a limited number before giving up.
+        */
+       for (i = 0; i < 5; i++) {
+               buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
+               if (gve_buf_ref_cnt(buf_state) == 0)
+                       return buf_state;
+
+               gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+       }
+
+       /* If there are no free buf states discard an entry from
+        * `used_buf_states` so it can be used.
+        */
+       if (unlikely(rx->dqo.free_buf_states == -1)) {
+               buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
+               if (gve_buf_ref_cnt(buf_state) == 0)
+                       return buf_state;
+
+               gve_free_page_dqo(rx->gve, buf_state);
+               gve_free_buf_state(rx, buf_state);
+       }
+
+       return NULL;
+}
+
+static int gve_alloc_page_dqo(struct gve_priv *priv,
+                             struct gve_rx_buf_state_dqo *buf_state)
+{
+       int err;
+
+       err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
+                            &buf_state->addr, DMA_FROM_DEVICE);
+       if (err)
+               return err;
+
+       buf_state->page_info.page_offset = 0;
+       buf_state->page_info.page_address =
+               page_address(buf_state->page_info.page);
+       buf_state->last_single_ref_offset = 0;
+
+       /* The page already has 1 ref. */
+       page_ref_add(buf_state->page_info.page, INT_MAX - 1);
+       buf_state->page_info.pagecnt_bias = INT_MAX;
+
+       return 0;
+}
+
+static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
+{
+       struct gve_rx_ring *rx = &priv->rx[idx];
+       struct device *hdev = &priv->pdev->dev;
+       size_t completion_queue_slots;
+       size_t buffer_queue_slots;
+       size_t size;
+       int i;
+
+       completion_queue_slots = rx->dqo.complq.mask + 1;
+       buffer_queue_slots = rx->dqo.bufq.mask + 1;
+
+       gve_rx_remove_from_block(priv, idx);
+
+       if (rx->q_resources) {
+               dma_free_coherent(hdev, sizeof(*rx->q_resources),
+                                 rx->q_resources, rx->q_resources_bus);
+               rx->q_resources = NULL;
+       }
+
+       for (i = 0; i < rx->dqo.num_buf_states; i++) {
+               struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
+
+               if (bs->page_info.page)
+                       gve_free_page_dqo(priv, bs);
+       }
+
+       if (rx->dqo.bufq.desc_ring) {
+               size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
+               dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring,
+                                 rx->dqo.bufq.bus);
+               rx->dqo.bufq.desc_ring = NULL;
+       }
+
+       if (rx->dqo.complq.desc_ring) {
+               size = sizeof(rx->dqo.complq.desc_ring[0]) *
+                       completion_queue_slots;
+               dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring,
+                                 rx->dqo.complq.bus);
+               rx->dqo.complq.desc_ring = NULL;
+       }
+
+       kvfree(rx->dqo.buf_states);
+       rx->dqo.buf_states = NULL;
+
+       netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
+}
+
+static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+{
+       struct gve_rx_ring *rx = &priv->rx[idx];
+       struct device *hdev = &priv->pdev->dev;
+       size_t size;
+       int i;
+
+       const u32 buffer_queue_slots =
+               priv->options_dqo_rda.rx_buff_ring_entries;
+       const u32 completion_queue_slots = priv->rx_desc_cnt;
+
+       netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
+
+       memset(rx, 0, sizeof(*rx));
+       rx->gve = priv;
+       rx->q_num = idx;
+       rx->dqo.bufq.mask = buffer_queue_slots - 1;
+       rx->dqo.complq.num_free_slots = completion_queue_slots;
+       rx->dqo.complq.mask = completion_queue_slots - 1;
+       rx->skb_head = NULL;
+       rx->skb_tail = NULL;
+
+       rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
+       rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
+                                     sizeof(rx->dqo.buf_states[0]),
+                                     GFP_KERNEL);
+       if (!rx->dqo.buf_states)
+               return -ENOMEM;
+
+       /* Set up linked list of buffer IDs */
+       for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
+               rx->dqo.buf_states[i].next = i + 1;
+
+       rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
+       rx->dqo.recycled_buf_states.head = -1;
+       rx->dqo.recycled_buf_states.tail = -1;
+       rx->dqo.used_buf_states.head = -1;
+       rx->dqo.used_buf_states.tail = -1;
+
+       /* Allocate RX completion queue */
+       size = sizeof(rx->dqo.complq.desc_ring[0]) *
+               completion_queue_slots;
+       rx->dqo.complq.desc_ring =
+               dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL);
+       if (!rx->dqo.complq.desc_ring)
+               goto err;
+
+       /* Allocate RX buffer queue */
+       size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
+       rx->dqo.bufq.desc_ring =
+               dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL);
+       if (!rx->dqo.bufq.desc_ring)
+               goto err;
+
+       rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources),
+                                            &rx->q_resources_bus, GFP_KERNEL);
+       if (!rx->q_resources)
+               goto err;
+
+       gve_rx_add_to_block(priv, idx);
+
+       return 0;
+
+err:
+       gve_rx_free_ring_dqo(priv, idx);
+       return -ENOMEM;
+}
+
+void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx)
+{
+       const struct gve_rx_ring *rx = &priv->rx[queue_idx];
+       u64 index = be32_to_cpu(rx->q_resources->db_index);
+
+       iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]);
+}
+
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
+{
+       int err = 0;
+       int i;
+
+       for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+               err = gve_rx_alloc_ring_dqo(priv, i);
+               if (err) {
+                       netif_err(priv, drv, priv->dev,
+                                 "Failed to alloc rx ring=%d: err=%d\n",
+                                 i, err);
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       for (i--; i >= 0; i--)
+               gve_rx_free_ring_dqo(priv, i);
+
+       return err;
+}
+
+void gve_rx_free_rings_dqo(struct gve_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->rx_cfg.num_queues; i++)
+               gve_rx_free_ring_dqo(priv, i);
+}
+
+void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
+{
+       struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
+       struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq;
+       struct gve_priv *priv = rx->gve;
+       u32 num_avail_slots;
+       u32 num_full_slots;
+       u32 num_posted = 0;
+
+       num_full_slots = (bufq->tail - bufq->head) & bufq->mask;
+       num_avail_slots = bufq->mask - num_full_slots;
+
+       num_avail_slots = min_t(u32, num_avail_slots, complq->num_free_slots);
+       while (num_posted < num_avail_slots) {
+               struct gve_rx_desc_dqo *desc = &bufq->desc_ring[bufq->tail];
+               struct gve_rx_buf_state_dqo *buf_state;
+
+               buf_state = gve_get_recycled_buf_state(rx);
+               if (unlikely(!buf_state)) {
+                       buf_state = gve_alloc_buf_state(rx);
+                       if (unlikely(!buf_state))
+                               break;
+
+                       if (unlikely(gve_alloc_page_dqo(priv, buf_state))) {
+                               u64_stats_update_begin(&rx->statss);
+                               rx->rx_buf_alloc_fail++;
+                               u64_stats_update_end(&rx->statss);
+                               gve_free_buf_state(rx, buf_state);
+                               break;
+                       }
+               }
+
+               desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+               desc->buf_addr = cpu_to_le64(buf_state->addr +
+                                            buf_state->page_info.page_offset);
+
+               bufq->tail = (bufq->tail + 1) & bufq->mask;
+               complq->num_free_slots--;
+               num_posted++;
+
+               if ((bufq->tail & (GVE_RX_BUF_THRESH_DQO - 1)) == 0)
+                       gve_rx_write_doorbell_dqo(priv, rx->q_num);
+       }
+
+       rx->fill_cnt += num_posted;
+}
+
+static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
+                               struct gve_rx_buf_state_dqo *buf_state)
+{
+       const int data_buffer_size = priv->data_buffer_size_dqo;
+       int pagecount;
+
+       /* Can't reuse if we only fit one buffer per page */
+       if (data_buffer_size * 2 > PAGE_SIZE)
+               goto mark_used;
+
+       pagecount = gve_buf_ref_cnt(buf_state);
+
+       /* Record the offset when we have a single remaining reference.
+        *
+        * When this happens, we know all of the other offsets of the page are
+        * usable.
+        */
+       if (pagecount == 1) {
+               buf_state->last_single_ref_offset =
+                       buf_state->page_info.page_offset;
+       }
+
+       /* Use the next buffer sized chunk in the page. */
+       buf_state->page_info.page_offset += data_buffer_size;
+       buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
+
+       /* If we wrap around to the same offset without ever dropping to 1
+        * reference, then we don't know if this offset was ever freed.
+        */
+       if (buf_state->page_info.page_offset ==
+           buf_state->last_single_ref_offset) {
+               goto mark_used;
+       }
+
+       gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+       return;
+
+mark_used:
+       gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+}
+
+static void gve_rx_skb_csum(struct sk_buff *skb,
+                           const struct gve_rx_compl_desc_dqo *desc,
+                           struct gve_ptype ptype)
+{
+       skb->ip_summed = CHECKSUM_NONE;
+
+       /* HW did not identify and process L3 and L4 headers. */
+       if (unlikely(!desc->l3_l4_processed))
+               return;
+
+       if (ptype.l3_type == GVE_L3_TYPE_IPV4) {
+               if (unlikely(desc->csum_ip_err || desc->csum_external_ip_err))
+                       return;
+       } else if (ptype.l3_type == GVE_L3_TYPE_IPV6) {
+               /* Checksum should be skipped if this flag is set. */
+               if (unlikely(desc->ipv6_ex_add))
+                       return;
+       }
+
+       if (unlikely(desc->csum_l4_err))
+               return;
+
+       switch (ptype.l4_type) {
+       case GVE_L4_TYPE_TCP:
+       case GVE_L4_TYPE_UDP:
+       case GVE_L4_TYPE_ICMP:
+       case GVE_L4_TYPE_SCTP:
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               break;
+       default:
+               break;
+       }
+}
+
+static void gve_rx_skb_hash(struct sk_buff *skb,
+                           const struct gve_rx_compl_desc_dqo *compl_desc,
+                           struct gve_ptype ptype)
+{
+       enum pkt_hash_types hash_type = PKT_HASH_TYPE_L2;
+
+       if (ptype.l4_type != GVE_L4_TYPE_UNKNOWN)
+               hash_type = PKT_HASH_TYPE_L4;
+       else if (ptype.l3_type != GVE_L3_TYPE_UNKNOWN)
+               hash_type = PKT_HASH_TYPE_L3;
+
+       skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
+}
+
+static void gve_rx_free_skb(struct gve_rx_ring *rx)
+{
+       if (!rx->skb_head)
+               return;
+
+       dev_kfree_skb_any(rx->skb_head);
+       rx->skb_head = NULL;
+       rx->skb_tail = NULL;
+}
+
+/* Chains multi skbs for single rx packet.
+ * Returns 0 if buffer is appended, -1 otherwise.
+ */
+static int gve_rx_append_frags(struct napi_struct *napi,
+                              struct gve_rx_buf_state_dqo *buf_state,
+                              u16 buf_len, struct gve_rx_ring *rx,
+                              struct gve_priv *priv)
+{
+       int num_frags = skb_shinfo(rx->skb_tail)->nr_frags;
+
+       if (unlikely(num_frags == MAX_SKB_FRAGS)) {
+               struct sk_buff *skb;
+
+               skb = napi_alloc_skb(napi, 0);
+               if (!skb)
+                       return -1;
+
+               skb_shinfo(rx->skb_tail)->frag_list = skb;
+               rx->skb_tail = skb;
+               num_frags = 0;
+       }
+       if (rx->skb_tail != rx->skb_head) {
+               rx->skb_head->len += buf_len;
+               rx->skb_head->data_len += buf_len;
+               rx->skb_head->truesize += priv->data_buffer_size_dqo;
+       }
+
+       skb_add_rx_frag(rx->skb_tail, num_frags,
+                       buf_state->page_info.page,
+                       buf_state->page_info.page_offset,
+                       buf_len, priv->data_buffer_size_dqo);
+       gve_dec_pagecnt_bias(&buf_state->page_info);
+
+       return 0;
+}
+
+/* Returns 0 if descriptor is completed successfully.
+ * Returns -EINVAL if descriptor is invalid.
+ * Returns -ENOMEM if data cannot be copied to skb.
+ */
+static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
+                     const struct gve_rx_compl_desc_dqo *compl_desc,
+                     int queue_idx)
+{
+       const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
+       const bool eop = compl_desc->end_of_packet != 0;
+       struct gve_rx_buf_state_dqo *buf_state;
+       struct gve_priv *priv = rx->gve;
+       u16 buf_len;
+
+       if (unlikely(buffer_id > rx->dqo.num_buf_states)) {
+               net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
+                                   priv->dev->name, buffer_id);
+               return -EINVAL;
+       }
+       buf_state = &rx->dqo.buf_states[buffer_id];
+       if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) {
+               net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n",
+                                   priv->dev->name, buffer_id);
+               return -EINVAL;
+       }
+
+       if (unlikely(compl_desc->rx_error)) {
+               gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
+                                     buf_state);
+               return -EINVAL;
+       }
+
+       buf_len = compl_desc->packet_len;
+
+       /* Page might have not been used for awhile and was likely last written
+        * by a different thread.
+        */
+       prefetch(buf_state->page_info.page);
+
+       /* Sync the portion of dma buffer for CPU to read. */
+       dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+                                     buf_state->page_info.page_offset,
+                                     buf_len, DMA_FROM_DEVICE);
+
+       /* Append to current skb if one exists. */
+       if (rx->skb_head) {
+               if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx,
+                                                priv)) != 0) {
+                       goto error;
+               }
+
+               gve_try_recycle_buf(priv, rx, buf_state);
+               return 0;
+       }
+
+       /* Prefetch the payload header. */
+       prefetch((char *)buf_state->addr + buf_state->page_info.page_offset);
+#if L1_CACHE_BYTES < 128
+       prefetch((char *)buf_state->addr + buf_state->page_info.page_offset +
+                L1_CACHE_BYTES);
+#endif
+
+       if (eop && buf_len <= priv->rx_copybreak) {
+               rx->skb_head = gve_rx_copy(priv->dev, napi,
+                                          &buf_state->page_info, buf_len, 0);
+               if (unlikely(!rx->skb_head))
+                       goto error;
+               rx->skb_tail = rx->skb_head;
+
+               u64_stats_update_begin(&rx->statss);
+               rx->rx_copied_pkt++;
+               rx->rx_copybreak_pkt++;
+               u64_stats_update_end(&rx->statss);
+
+               gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
+                                     buf_state);
+               return 0;
+       }
+
+       rx->skb_head = napi_get_frags(napi);
+       if (unlikely(!rx->skb_head))
+               goto error;
+       rx->skb_tail = rx->skb_head;
+
+       skb_add_rx_frag(rx->skb_head, 0, buf_state->page_info.page,
+                       buf_state->page_info.page_offset, buf_len,
+                       priv->data_buffer_size_dqo);
+       gve_dec_pagecnt_bias(&buf_state->page_info);
+
+       gve_try_recycle_buf(priv, rx, buf_state);
+       return 0;
+
+error:
+       gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+       return -ENOMEM;
+}
+
+static int gve_rx_complete_rsc(struct sk_buff *skb,
+                              const struct gve_rx_compl_desc_dqo *desc,
+                              struct gve_ptype ptype)
+{
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+       /* Only TCP is supported right now. */
+       if (ptype.l4_type != GVE_L4_TYPE_TCP)
+               return -EINVAL;
+
+       switch (ptype.l3_type) {
+       case GVE_L3_TYPE_IPV4:
+               shinfo->gso_type = SKB_GSO_TCPV4;
+               break;
+       case GVE_L3_TYPE_IPV6:
+               shinfo->gso_type = SKB_GSO_TCPV6;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len);
+       return 0;
+}
+
+/* Returns 0 if skb is completed successfully, -1 otherwise. */
+static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
+                              const struct gve_rx_compl_desc_dqo *desc,
+                              netdev_features_t feat)
+{
+       struct gve_ptype ptype =
+               rx->gve->ptype_lut_dqo->ptypes[desc->packet_type];
+       int err;
+
+       skb_record_rx_queue(rx->skb_head, rx->q_num);
+
+       if (feat & NETIF_F_RXHASH)
+               gve_rx_skb_hash(rx->skb_head, desc, ptype);
+
+       if (feat & NETIF_F_RXCSUM)
+               gve_rx_skb_csum(rx->skb_head, desc, ptype);
+
+       /* RSC packets must set gso_size otherwise the TCP stack will complain
+        * that packets are larger than MTU.
+        */
+       if (desc->rsc) {
+               err = gve_rx_complete_rsc(rx->skb_head, desc, ptype);
+               if (err < 0)
+                       return err;
+       }
+
+       if (skb_headlen(rx->skb_head) == 0)
+               napi_gro_frags(napi);
+       else
+               napi_gro_receive(napi, rx->skb_head);
+
+       return 0;
+}
+
+int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
+{
+       struct napi_struct *napi = &block->napi;
+       netdev_features_t feat = napi->dev->features;
+
+       struct gve_rx_ring *rx = block->rx;
+       struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
+
+       u32 work_done = 0;
+       u64 bytes = 0;
+       int err;
+
+       while (work_done < budget) {
+               struct gve_rx_compl_desc_dqo *compl_desc =
+                       &complq->desc_ring[complq->head];
+               u32 pkt_bytes;
+
+               /* No more new packets */
+               if (compl_desc->generation == complq->cur_gen_bit)
+                       break;
+
+               /* Prefetch the next two descriptors. */
+               prefetch(&complq->desc_ring[(complq->head + 1) & complq->mask]);
+               prefetch(&complq->desc_ring[(complq->head + 2) & complq->mask]);
+
+               /* Do not read data until we own the descriptor */
+               dma_rmb();
+
+               err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
+               if (err < 0) {
+                       gve_rx_free_skb(rx);
+                       u64_stats_update_begin(&rx->statss);
+                       if (err == -ENOMEM)
+                               rx->rx_skb_alloc_fail++;
+                       else if (err == -EINVAL)
+                               rx->rx_desc_err_dropped_pkt++;
+                       u64_stats_update_end(&rx->statss);
+               }
+
+               complq->head = (complq->head + 1) & complq->mask;
+               complq->num_free_slots++;
+
+               /* When the ring wraps, the generation bit is flipped. */
+               complq->cur_gen_bit ^= (complq->head == 0);
+
+               /* Receiving a completion means we have space to post another
+                * buffer on the buffer queue.
+                */
+               {
+                       struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq;
+
+                       bufq->head = (bufq->head + 1) & bufq->mask;
+               }
+
+               /* Free running counter of completed descriptors */
+               rx->cnt++;
+
+               if (!rx->skb_head)
+                       continue;
+
+               if (!compl_desc->end_of_packet)
+                       continue;
+
+               work_done++;
+               pkt_bytes = rx->skb_head->len;
+               /* The ethernet header (first ETH_HLEN bytes) is snipped off
+                * by eth_type_trans.
+                */
+               if (skb_headlen(rx->skb_head))
+                       pkt_bytes += ETH_HLEN;
+
+               /* gve_rx_complete_skb() will consume skb if successful */
+               if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
+                       gve_rx_free_skb(rx);
+                       u64_stats_update_begin(&rx->statss);
+                       rx->rx_desc_err_dropped_pkt++;
+                       u64_stats_update_end(&rx->statss);
+                       continue;
+               }
+
+               bytes += pkt_bytes;
+               rx->skb_head = NULL;
+               rx->skb_tail = NULL;
+       }
+
+       gve_rx_post_buffers_dqo(rx);
+
+       u64_stats_update_begin(&rx->statss);
+       rx->rpackets += work_done;
+       rx->rbytes += bytes;
+       u64_stats_update_end(&rx->statss);
+
+       return work_done;
+}
index 3e04a39..665ac79 100644 (file)
@@ -1,11 +1,12 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include "gve.h"
 #include "gve_adminq.h"
+#include "gve_utils.h"
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/vmalloc.h>
@@ -131,14 +132,6 @@ static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
        atomic_add(bytes, &fifo->available);
 }
 
-static void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
-{
-       struct gve_notify_block *block =
-                       &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
-
-       block->tx = NULL;
-}
-
 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
                             u32 to_do, bool try_to_wake);
 
@@ -174,16 +167,6 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
        netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
 }
 
-static void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
-{
-       int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
-       struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
-       struct gve_tx_ring *tx = &priv->tx[queue_idx];
-
-       block->tx = tx;
-       tx->ntfy_id = ntfy_idx;
-}
-
 static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
 {
        struct gve_tx_ring *tx = &priv->tx[idx];
@@ -208,7 +191,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
        if (!tx->desc)
                goto abort_with_info;
 
-       tx->raw_addressing = priv->raw_addressing;
+       tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
        tx->dev = &priv->pdev->dev;
        if (!tx->raw_addressing) {
                tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
@@ -273,7 +256,7 @@ int gve_tx_alloc_rings(struct gve_priv *priv)
        return err;
 }
 
-void gve_tx_free_rings(struct gve_priv *priv)
+void gve_tx_free_rings_gqi(struct gve_priv *priv)
 {
        int i;
 
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
new file mode 100644 (file)
index 0000000..05ddb6a
--- /dev/null
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+#include "gve_utils.h"
+#include "gve_dqo.h"
+#include <linux/tcp.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+
+/* Returns true if a gve_tx_pending_packet_dqo object is available. */
+static bool gve_has_pending_packet(struct gve_tx_ring *tx)
+{
+       /* Check TX path's list. */
+       if (tx->dqo_tx.free_pending_packets != -1)
+               return true;
+
+       /* Check completion handler's list. */
+       if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1)
+               return true;
+
+       return false;
+}
+
+static struct gve_tx_pending_packet_dqo *
+gve_alloc_pending_packet(struct gve_tx_ring *tx)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+       s16 index;
+
+       index = tx->dqo_tx.free_pending_packets;
+
+       /* No pending_packets available, try to steal the list from the
+        * completion handler.
+        */
+       if (unlikely(index == -1)) {
+               tx->dqo_tx.free_pending_packets =
+                       atomic_xchg(&tx->dqo_compl.free_pending_packets, -1);
+               index = tx->dqo_tx.free_pending_packets;
+
+               if (unlikely(index == -1))
+                       return NULL;
+       }
+
+       pending_packet = &tx->dqo.pending_packets[index];
+
+       /* Remove pending_packet from free list */
+       tx->dqo_tx.free_pending_packets = pending_packet->next;
+       pending_packet->state = GVE_PACKET_STATE_PENDING_DATA_COMPL;
+
+       return pending_packet;
+}
+
+static void
+gve_free_pending_packet(struct gve_tx_ring *tx,
+                       struct gve_tx_pending_packet_dqo *pending_packet)
+{
+       s16 index = pending_packet - tx->dqo.pending_packets;
+
+       pending_packet->state = GVE_PACKET_STATE_UNALLOCATED;
+       while (true) {
+               s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets);
+
+               pending_packet->next = old_head;
+               if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets,
+                                  old_head, index) == old_head) {
+                       break;
+               }
+       }
+}
+
+/* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
+ */
+static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
+{
+       int i;
+
+       for (i = 0; i < tx->dqo.num_pending_packets; i++) {
+               struct gve_tx_pending_packet_dqo *cur_state =
+                       &tx->dqo.pending_packets[i];
+               int j;
+
+               for (j = 0; j < cur_state->num_bufs; j++) {
+                       struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
+
+                       if (j == 0) {
+                               dma_unmap_single(tx->dev,
+                                                dma_unmap_addr(buf, dma),
+                                                dma_unmap_len(buf, len),
+                                                DMA_TO_DEVICE);
+                       } else {
+                               dma_unmap_page(tx->dev,
+                                              dma_unmap_addr(buf, dma),
+                                              dma_unmap_len(buf, len),
+                                              DMA_TO_DEVICE);
+                       }
+               }
+               if (cur_state->skb) {
+                       dev_consume_skb_any(cur_state->skb);
+                       cur_state->skb = NULL;
+               }
+       }
+}
+
+static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
+{
+       struct gve_tx_ring *tx = &priv->tx[idx];
+       struct device *hdev = &priv->pdev->dev;
+       size_t bytes;
+
+       gve_tx_remove_from_block(priv, idx);
+
+       if (tx->q_resources) {
+               dma_free_coherent(hdev, sizeof(*tx->q_resources),
+                                 tx->q_resources, tx->q_resources_bus);
+               tx->q_resources = NULL;
+       }
+
+       if (tx->dqo.compl_ring) {
+               bytes = sizeof(tx->dqo.compl_ring[0]) *
+                       (tx->dqo.complq_mask + 1);
+               dma_free_coherent(hdev, bytes, tx->dqo.compl_ring,
+                                 tx->complq_bus_dqo);
+               tx->dqo.compl_ring = NULL;
+       }
+
+       if (tx->dqo.tx_ring) {
+               bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
+               dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus);
+               tx->dqo.tx_ring = NULL;
+       }
+
+       kvfree(tx->dqo.pending_packets);
+       tx->dqo.pending_packets = NULL;
+
+       netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
+}
+
+static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+{
+       struct gve_tx_ring *tx = &priv->tx[idx];
+       struct device *hdev = &priv->pdev->dev;
+       int num_pending_packets;
+       size_t bytes;
+       int i;
+
+       memset(tx, 0, sizeof(*tx));
+       tx->q_num = idx;
+       tx->dev = &priv->pdev->dev;
+       tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+       atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
+
+       /* Queue sizes must be a power of 2 */
+       tx->mask = priv->tx_desc_cnt - 1;
+       tx->dqo.complq_mask = priv->options_dqo_rda.tx_comp_ring_entries - 1;
+
+       /* The max number of pending packets determines the maximum number of
+        * descriptors which maybe written to the completion queue.
+        *
+        * We must set the number small enough to make sure we never overrun the
+        * completion queue.
+        */
+       num_pending_packets = tx->dqo.complq_mask + 1;
+
+       /* Reserve space for descriptor completions, which will be reported at
+        * most every GVE_TX_MIN_RE_INTERVAL packets.
+        */
+       num_pending_packets -=
+               (tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL;
+
+       /* Each packet may have at most 2 buffer completions if it receives both
+        * a miss and reinjection completion.
+        */
+       num_pending_packets /= 2;
+
+       tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX);
+       tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets,
+                                          sizeof(tx->dqo.pending_packets[0]),
+                                          GFP_KERNEL);
+       if (!tx->dqo.pending_packets)
+               goto err;
+
+       /* Set up linked list of pending packets */
+       for (i = 0; i < tx->dqo.num_pending_packets - 1; i++)
+               tx->dqo.pending_packets[i].next = i + 1;
+
+       tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
+       atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
+       tx->dqo_compl.miss_completions.head = -1;
+       tx->dqo_compl.miss_completions.tail = -1;
+       tx->dqo_compl.timed_out_completions.head = -1;
+       tx->dqo_compl.timed_out_completions.tail = -1;
+
+       bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
+       tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
+       if (!tx->dqo.tx_ring)
+               goto err;
+
+       bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1);
+       tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes,
+                                               &tx->complq_bus_dqo,
+                                               GFP_KERNEL);
+       if (!tx->dqo.compl_ring)
+               goto err;
+
+       tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources),
+                                            &tx->q_resources_bus, GFP_KERNEL);
+       if (!tx->q_resources)
+               goto err;
+
+       gve_tx_add_to_block(priv, idx);
+
+       return 0;
+
+err:
+       gve_tx_free_ring_dqo(priv, idx);
+       return -ENOMEM;
+}
+
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
+{
+       int err = 0;
+       int i;
+
+       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+               err = gve_tx_alloc_ring_dqo(priv, i);
+               if (err) {
+                       netif_err(priv, drv, priv->dev,
+                                 "Failed to alloc tx ring=%d: err=%d\n",
+                                 i, err);
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       for (i--; i >= 0; i--)
+               gve_tx_free_ring_dqo(priv, i);
+
+       return err;
+}
+
+void gve_tx_free_rings_dqo(struct gve_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+               struct gve_tx_ring *tx = &priv->tx[i];
+
+               gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
+               netdev_tx_reset_queue(tx->netdev_txq);
+               gve_tx_clean_pending_packets(tx);
+
+               gve_tx_free_ring_dqo(priv, i);
+       }
+}
+
+/* Returns the number of slots available in the ring */
+static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
+{
+       u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask;
+
+       return tx->mask - num_used;
+}
+
+/* Stops the queue if available descriptors is less than 'count'.
+ * Return: 0 if stop is not required.
+ */
+static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, int count)
+{
+       if (likely(gve_has_pending_packet(tx) &&
+                  num_avail_tx_slots(tx) >= count))
+               return 0;
+
+       /* Update cached TX head pointer */
+       tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+       if (likely(gve_has_pending_packet(tx) &&
+                  num_avail_tx_slots(tx) >= count))
+               return 0;
+
+       /* No space, so stop the queue */
+       tx->stop_queue++;
+       netif_tx_stop_queue(tx->netdev_txq);
+
+       /* Sync with restarting queue in `gve_tx_poll_dqo()` */
+       mb();
+
+       /* After stopping queue, check if we can transmit again in order to
+        * avoid TOCTOU bug.
+        */
+       tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+       if (likely(!gve_has_pending_packet(tx) ||
+                  num_avail_tx_slots(tx) < count))
+               return -EBUSY;
+
+       netif_tx_start_queue(tx->netdev_txq);
+       tx->wake_queue++;
+       return 0;
+}
+
+static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
+                                       struct gve_tx_metadata_dqo *metadata)
+{
+       memset(metadata, 0, sizeof(*metadata));
+       metadata->version = GVE_TX_METADATA_VERSION_DQO;
+
+       if (skb->l4_hash) {
+               u16 path_hash = skb->hash ^ (skb->hash >> 16);
+
+               path_hash &= (1 << 15) - 1;
+               if (unlikely(path_hash == 0))
+                       path_hash = ~path_hash;
+
+               metadata->path_hash = path_hash;
+       }
+}
+
+static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
+                                    struct sk_buff *skb, u32 len, u64 addr,
+                                    s16 compl_tag, bool eop, bool is_gso)
+{
+       const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
+
+       while (len > 0) {
+               struct gve_tx_pkt_desc_dqo *desc =
+                       &tx->dqo.tx_ring[*desc_idx].pkt;
+               u32 cur_len = min_t(u32, len, GVE_TX_MAX_BUF_SIZE_DQO);
+               bool cur_eop = eop && cur_len == len;
+
+               *desc = (struct gve_tx_pkt_desc_dqo){
+                       .buf_addr = cpu_to_le64(addr),
+                       .dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
+                       .end_of_packet = cur_eop,
+                       .checksum_offload_enable = checksum_offload_en,
+                       .compl_tag = cpu_to_le16(compl_tag),
+                       .buf_size = cur_len,
+               };
+
+               addr += cur_len;
+               len -= cur_len;
+               *desc_idx = (*desc_idx + 1) & tx->mask;
+       }
+}
+
+/* Validates and prepares `skb` for TSO.
+ *
+ * Returns header length, or < 0 if invalid.
+ */
+static int gve_prep_tso(struct sk_buff *skb)
+{
+       struct tcphdr *tcp;
+       int header_len;
+       u32 paylen;
+       int err;
+
+       /* Note: HW requires MSS (gso_size) to be <= 9728 and the total length
+        * of the TSO to be <= 262143.
+        *
+        * However, we don't validate these because:
+        * - Hypervisor enforces a limit of 9K MTU
+        * - Kernel will not produce a TSO larger than 64k
+        */
+
+       if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
+               return -1;
+
+       /* Needed because we will modify header. */
+       err = skb_cow_head(skb, 0);
+       if (err < 0)
+               return err;
+
+       tcp = tcp_hdr(skb);
+
+       /* Remove payload length from checksum. */
+       paylen = skb->len - skb_transport_offset(skb);
+
+       switch (skb_shinfo(skb)->gso_type) {
+       case SKB_GSO_TCPV4:
+       case SKB_GSO_TCPV6:
+               csum_replace_by_diff(&tcp->check,
+                                    (__force __wsum)htonl(paylen));
+
+               /* Compute length of segmentation header. */
+               header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
+               return -EINVAL;
+
+       return header_len;
+}
+
+static void gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo *desc,
+                                    const struct sk_buff *skb,
+                                    const struct gve_tx_metadata_dqo *metadata,
+                                    int header_len)
+{
+       *desc = (struct gve_tx_tso_context_desc_dqo){
+               .header_len = header_len,
+               .cmd_dtype = {
+                       .dtype = GVE_TX_TSO_CTX_DESC_DTYPE_DQO,
+                       .tso = 1,
+               },
+               .flex0 = metadata->bytes[0],
+               .flex5 = metadata->bytes[5],
+               .flex6 = metadata->bytes[6],
+               .flex7 = metadata->bytes[7],
+               .flex8 = metadata->bytes[8],
+               .flex9 = metadata->bytes[9],
+               .flex10 = metadata->bytes[10],
+               .flex11 = metadata->bytes[11],
+       };
+       desc->tso_total_len = skb->len - header_len;
+       desc->mss = skb_shinfo(skb)->gso_size;
+}
+
+static void
+gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
+                            const struct gve_tx_metadata_dqo *metadata)
+{
+       *desc = (struct gve_tx_general_context_desc_dqo){
+               .flex0 = metadata->bytes[0],
+               .flex1 = metadata->bytes[1],
+               .flex2 = metadata->bytes[2],
+               .flex3 = metadata->bytes[3],
+               .flex4 = metadata->bytes[4],
+               .flex5 = metadata->bytes[5],
+               .flex6 = metadata->bytes[6],
+               .flex7 = metadata->bytes[7],
+               .flex8 = metadata->bytes[8],
+               .flex9 = metadata->bytes[9],
+               .flex10 = metadata->bytes[10],
+               .flex11 = metadata->bytes[11],
+               .cmd_dtype = {.dtype = GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO},
+       };
+}
+
+/* Returns 0 on success, or < 0 on error.
+ *
+ * Before this function is called, the caller must ensure
+ * gve_has_pending_packet(tx) returns true.
+ */
+static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
+                                     struct sk_buff *skb)
+{
+       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       const bool is_gso = skb_is_gso(skb);
+       u32 desc_idx = tx->dqo_tx.tail;
+
+       struct gve_tx_pending_packet_dqo *pending_packet;
+       struct gve_tx_metadata_dqo metadata;
+       s16 completion_tag;
+       int i;
+
+       pending_packet = gve_alloc_pending_packet(tx);
+       pending_packet->skb = skb;
+       pending_packet->num_bufs = 0;
+       completion_tag = pending_packet - tx->dqo.pending_packets;
+
+       gve_extract_tx_metadata_dqo(skb, &metadata);
+       if (is_gso) {
+               int header_len = gve_prep_tso(skb);
+
+               if (unlikely(header_len < 0))
+                       goto err;
+
+               gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx,
+                                        skb, &metadata, header_len);
+               desc_idx = (desc_idx + 1) & tx->mask;
+       }
+
+       gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx,
+                                    &metadata);
+       desc_idx = (desc_idx + 1) & tx->mask;
+
+       /* Note: HW requires that the size of a non-TSO packet be within the
+        * range of [17, 9728].
+        *
+        * We don't double check because
+        * - We limited `netdev->min_mtu` to ETH_MIN_MTU.
+        * - Hypervisor won't allow MTU larger than 9216.
+        */
+
+       /* Map the linear portion of skb */
+       {
+               struct gve_tx_dma_buf *buf =
+                       &pending_packet->bufs[pending_packet->num_bufs];
+               u32 len = skb_headlen(skb);
+               dma_addr_t addr;
+
+               addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(tx->dev, addr)))
+                       goto err;
+
+               dma_unmap_len_set(buf, len, len);
+               dma_unmap_addr_set(buf, dma, addr);
+               ++pending_packet->num_bufs;
+
+               gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
+                                        completion_tag,
+                                        /*eop=*/shinfo->nr_frags == 0, is_gso);
+       }
+
+       for (i = 0; i < shinfo->nr_frags; i++) {
+               struct gve_tx_dma_buf *buf =
+                       &pending_packet->bufs[pending_packet->num_bufs];
+               const skb_frag_t *frag = &shinfo->frags[i];
+               bool is_eop = i == (shinfo->nr_frags - 1);
+               u32 len = skb_frag_size(frag);
+               dma_addr_t addr;
+
+               addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(tx->dev, addr)))
+                       goto err;
+
+               dma_unmap_len_set(buf, len, len);
+               dma_unmap_addr_set(buf, dma, addr);
+               ++pending_packet->num_bufs;
+
+               gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
+                                        completion_tag, is_eop, is_gso);
+       }
+
+       /* Commit the changes to our state */
+       tx->dqo_tx.tail = desc_idx;
+
+       /* Request a descriptor completion on the last descriptor of the
+        * packet if we are allowed to by the HW enforced interval.
+        */
+       {
+               u32 last_desc_idx = (desc_idx - 1) & tx->mask;
+               u32 last_report_event_interval =
+                       (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
+
+               if (unlikely(last_report_event_interval >=
+                            GVE_TX_MIN_RE_INTERVAL)) {
+                       tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
+                       tx->dqo_tx.last_re_idx = last_desc_idx;
+               }
+       }
+
+       return 0;
+
+err:
+       for (i = 0; i < pending_packet->num_bufs; i++) {
+               struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
+
+               if (i == 0) {
+                       dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
+                                        dma_unmap_len(buf, len),
+                                        DMA_TO_DEVICE);
+               } else {
+                       dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
+                                      dma_unmap_len(buf, len), DMA_TO_DEVICE);
+               }
+       }
+
+       pending_packet->skb = NULL;
+       pending_packet->num_bufs = 0;
+       gve_free_pending_packet(tx, pending_packet);
+
+       return -1;
+}
+
+static int gve_num_descs_per_buf(size_t size)
+{
+       return DIV_ROUND_UP(size, GVE_TX_MAX_BUF_SIZE_DQO);
+}
+
+static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
+{
+       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       int num_descs;
+       int i;
+
+       num_descs = gve_num_descs_per_buf(skb_headlen(skb));
+
+       for (i = 0; i < shinfo->nr_frags; i++) {
+               unsigned int frag_size = skb_frag_size(&shinfo->frags[i]);
+
+               num_descs += gve_num_descs_per_buf(frag_size);
+       }
+
+       return num_descs;
+}
+
+/* Returns true if HW is capable of sending TSO represented by `skb`.
+ *
+ * Each segment must not span more than GVE_TX_MAX_DATA_DESCS buffers.
+ * - The header is counted as one buffer for every single segment.
+ * - A buffer which is split between two segments is counted for both.
+ * - If a buffer contains both header and payload, it is counted as two buffers.
+ */
+static bool gve_can_send_tso(const struct sk_buff *skb)
+{
+       const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
+       const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
+       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       const int gso_size = shinfo->gso_size;
+       int cur_seg_num_bufs;
+       int cur_seg_size;
+       int i;
+
+       cur_seg_size = skb_headlen(skb) - header_len;
+       cur_seg_num_bufs = cur_seg_size > 0;
+
+       for (i = 0; i < shinfo->nr_frags; i++) {
+               if (cur_seg_size >= gso_size) {
+                       cur_seg_size %= gso_size;
+                       cur_seg_num_bufs = cur_seg_size > 0;
+               }
+
+               if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
+                       return false;
+
+               cur_seg_size += skb_frag_size(&shinfo->frags[i]);
+       }
+
+       return true;
+}
+
+/* Attempt to transmit specified SKB.
+ *
+ * Returns 0 if the SKB was transmitted or dropped.
+ * Returns -1 if there is not currently enough space to transmit the SKB.
+ */
+static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
+                         struct sk_buff *skb)
+{
+       int num_buffer_descs;
+       int total_num_descs;
+
+       if (skb_is_gso(skb)) {
+               /* If TSO doesn't meet HW requirements, attempt to linearize the
+                * packet.
+                */
+               if (unlikely(!gve_can_send_tso(skb) &&
+                            skb_linearize(skb) < 0)) {
+                       net_err_ratelimited("%s: Failed to transmit TSO packet\n",
+                                           priv->dev->name);
+                       goto drop;
+               }
+
+               num_buffer_descs = gve_num_buffer_descs_needed(skb);
+       } else {
+               num_buffer_descs = gve_num_buffer_descs_needed(skb);
+
+               if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
+                       if (unlikely(skb_linearize(skb) < 0))
+                               goto drop;
+
+                       num_buffer_descs = 1;
+               }
+       }
+
+       /* Metadata + (optional TSO) + data descriptors. */
+       total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
+       if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
+                       GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP))) {
+               return -1;
+       }
+
+       if (unlikely(gve_tx_add_skb_no_copy_dqo(tx, skb) < 0))
+               goto drop;
+
+       netdev_tx_sent_queue(tx->netdev_txq, skb->len);
+       skb_tx_timestamp(skb);
+       return 0;
+
+drop:
+       tx->dropped_pkt++;
+       dev_kfree_skb_any(skb);
+       return 0;
+}
+
+/* Transmit a given skb and ring the doorbell. */
+netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
+{
+       struct gve_priv *priv = netdev_priv(dev);
+       struct gve_tx_ring *tx;
+
+       tx = &priv->tx[skb_get_queue_mapping(skb)];
+       if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) {
+               /* We need to ring the txq doorbell -- we have stopped the Tx
+                * queue for want of resources, but prior calls to gve_tx()
+                * may have added descriptors without ringing the doorbell.
+                */
+               gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
+               return NETDEV_TX_OK;
+
+       gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+       return NETDEV_TX_OK;
+}
+
+static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
+                       struct gve_tx_pending_packet_dqo *pending_packet)
+{
+       s16 old_tail, index;
+
+       index = pending_packet - tx->dqo.pending_packets;
+       old_tail = list->tail;
+       list->tail = index;
+       if (old_tail == -1)
+               list->head = index;
+       else
+               tx->dqo.pending_packets[old_tail].next = index;
+
+       pending_packet->next = -1;
+       pending_packet->prev = old_tail;
+}
+
+static void remove_from_list(struct gve_tx_ring *tx,
+                            struct gve_index_list *list,
+                            struct gve_tx_pending_packet_dqo *pending_packet)
+{
+       s16 prev_index, next_index;
+
+       prev_index = pending_packet->prev;
+       next_index = pending_packet->next;
+
+       if (prev_index == -1) {
+               /* Node is head */
+               list->head = next_index;
+       } else {
+               tx->dqo.pending_packets[prev_index].next = next_index;
+       }
+       if (next_index == -1) {
+               /* Node is tail */
+               list->tail = prev_index;
+       } else {
+               tx->dqo.pending_packets[next_index].prev = prev_index;
+       }
+}
+
+static void gve_unmap_packet(struct device *dev,
+                            struct gve_tx_pending_packet_dqo *pending_packet)
+{
+       struct gve_tx_dma_buf *buf;
+       int i;
+
+       /* SKB linear portion is guaranteed to be mapped */
+       buf = &pending_packet->bufs[0];
+       dma_unmap_single(dev, dma_unmap_addr(buf, dma),
+                        dma_unmap_len(buf, len), DMA_TO_DEVICE);
+       for (i = 1; i < pending_packet->num_bufs; i++) {
+               buf = &pending_packet->bufs[i];
+               dma_unmap_page(dev, dma_unmap_addr(buf, dma),
+                              dma_unmap_len(buf, len), DMA_TO_DEVICE);
+       }
+       pending_packet->num_bufs = 0;
+}
+
+/* Completion types and expected behavior:
+ * No Miss compl + Packet compl = Packet completed normally.
+ * Miss compl + Re-inject compl = Packet completed normally.
+ * No Miss compl + Re-inject compl = Skipped i.e. packet not completed.
+ * Miss compl + Packet compl = Skipped i.e. packet not completed.
+ */
+static void gve_handle_packet_completion(struct gve_priv *priv,
+                                        struct gve_tx_ring *tx, bool is_napi,
+                                        u16 compl_tag, u64 *bytes, u64 *pkts,
+                                        bool is_reinjection)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+
+       if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
+               net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
+                                   priv->dev->name, (int)compl_tag);
+               return;
+       }
+
+       pending_packet = &tx->dqo.pending_packets[compl_tag];
+
+       if (unlikely(is_reinjection)) {
+               if (unlikely(pending_packet->state ==
+                            GVE_PACKET_STATE_TIMED_OUT_COMPL)) {
+                       net_err_ratelimited("%s: Re-injection completion: %d received after timeout.\n",
+                                           priv->dev->name, (int)compl_tag);
+                       /* Packet was already completed as a result of timeout,
+                        * so just remove from list and free pending packet.
+                        */
+                       remove_from_list(tx,
+                                        &tx->dqo_compl.timed_out_completions,
+                                        pending_packet);
+                       gve_free_pending_packet(tx, pending_packet);
+                       return;
+               }
+               if (unlikely(pending_packet->state !=
+                            GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
+                       /* No outstanding miss completion but packet allocated
+                        * implies packet receives a re-injection completion
+                        * without a a prior miss completion. Return without
+                        * completing the packet.
+                        */
+                       net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
+                                           priv->dev->name, (int)compl_tag);
+                       return;
+               }
+               remove_from_list(tx, &tx->dqo_compl.miss_completions,
+                                pending_packet);
+       } else {
+               /* Packet is allocated but not a pending data completion. */
+               if (unlikely(pending_packet->state !=
+                            GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
+                       net_err_ratelimited("%s: No pending data completion: %d\n",
+                                           priv->dev->name, (int)compl_tag);
+                       return;
+               }
+       }
+       gve_unmap_packet(tx->dev, pending_packet);
+
+       *bytes += pending_packet->skb->len;
+       (*pkts)++;
+       napi_consume_skb(pending_packet->skb, is_napi);
+       pending_packet->skb = NULL;
+       gve_free_pending_packet(tx, pending_packet);
+}
+
+static void gve_handle_miss_completion(struct gve_priv *priv,
+                                      struct gve_tx_ring *tx, u16 compl_tag,
+                                      u64 *bytes, u64 *pkts)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+
+       if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
+               net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
+                                   priv->dev->name, (int)compl_tag);
+               return;
+       }
+
+       pending_packet = &tx->dqo.pending_packets[compl_tag];
+       if (unlikely(pending_packet->state !=
+                               GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
+               net_err_ratelimited("%s: Unexpected packet state: %d for completion tag : %d\n",
+                                   priv->dev->name, (int)pending_packet->state,
+                                   (int)compl_tag);
+               return;
+       }
+
+       pending_packet->state = GVE_PACKET_STATE_PENDING_REINJECT_COMPL;
+       /* jiffies can wraparound but time comparisons can handle overflows. */
+       pending_packet->timeout_jiffies =
+                       jiffies +
+                       msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
+                                        MSEC_PER_SEC);
+       add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
+
+       *bytes += pending_packet->skb->len;
+       (*pkts)++;
+}
+
+static void remove_miss_completions(struct gve_priv *priv,
+                                   struct gve_tx_ring *tx)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+       s16 next_index;
+
+       next_index = tx->dqo_compl.miss_completions.head;
+       while (next_index != -1) {
+               pending_packet = &tx->dqo.pending_packets[next_index];
+               next_index = pending_packet->next;
+               /* Break early because packets should timeout in order. */
+               if (time_is_after_jiffies(pending_packet->timeout_jiffies))
+                       break;
+
+               remove_from_list(tx, &tx->dqo_compl.miss_completions,
+                                pending_packet);
+               /* Unmap buffers and free skb but do not unallocate packet i.e.
+                * the completion tag is not freed to ensure that the driver
+                * can take appropriate action if a corresponding valid
+                * completion is received later.
+                */
+               gve_unmap_packet(tx->dev, pending_packet);
+               /* This indicates the packet was dropped. */
+               dev_kfree_skb_any(pending_packet->skb);
+               pending_packet->skb = NULL;
+               tx->dropped_pkt++;
+               net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
+                                   priv->dev->name,
+                                   (int)(pending_packet - tx->dqo.pending_packets));
+
+               pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
+               pending_packet->timeout_jiffies =
+                               jiffies +
+                               msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
+                                                MSEC_PER_SEC);
+               /* Maintain pending packet in another list so the packet can be
+                * unallocated at a later time.
+                */
+               add_to_list(tx, &tx->dqo_compl.timed_out_completions,
+                           pending_packet);
+       }
+}
+
+static void remove_timed_out_completions(struct gve_priv *priv,
+                                        struct gve_tx_ring *tx)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+       s16 next_index;
+
+       next_index = tx->dqo_compl.timed_out_completions.head;
+       while (next_index != -1) {
+               pending_packet = &tx->dqo.pending_packets[next_index];
+               next_index = pending_packet->next;
+               /* Break early because packets should timeout in order. */
+               if (time_is_after_jiffies(pending_packet->timeout_jiffies))
+                       break;
+
+               remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
+                                pending_packet);
+               gve_free_pending_packet(tx, pending_packet);
+       }
+}
+
+int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+                         struct napi_struct *napi)
+{
+       u64 reinject_compl_bytes = 0;
+       u64 reinject_compl_pkts = 0;
+       int num_descs_cleaned = 0;
+       u64 miss_compl_bytes = 0;
+       u64 miss_compl_pkts = 0;
+       u64 pkt_compl_bytes = 0;
+       u64 pkt_compl_pkts = 0;
+
+       /* Limit in order to avoid blocking for too long */
+       while (!napi || pkt_compl_pkts < napi->weight) {
+               struct gve_tx_compl_desc *compl_desc =
+                       &tx->dqo.compl_ring[tx->dqo_compl.head];
+               u16 type;
+
+               if (compl_desc->generation == tx->dqo_compl.cur_gen_bit)
+                       break;
+
+               /* Prefetch the next descriptor. */
+               prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) &
+                               tx->dqo.complq_mask]);
+
+               /* Do not read data until we own the descriptor */
+               dma_rmb();
+               type = compl_desc->type;
+
+               if (type == GVE_COMPL_TYPE_DQO_DESC) {
+                       /* This is the last descriptor fetched by HW plus one */
+                       u16 tx_head = le16_to_cpu(compl_desc->tx_head);
+
+                       atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head);
+               } else if (type == GVE_COMPL_TYPE_DQO_PKT) {
+                       u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
+
+                       gve_handle_packet_completion(priv, tx, !!napi,
+                                                    compl_tag,
+                                                    &pkt_compl_bytes,
+                                                    &pkt_compl_pkts,
+                                                    /*is_reinjection=*/false);
+               } else if (type == GVE_COMPL_TYPE_DQO_MISS) {
+                       u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
+
+                       gve_handle_miss_completion(priv, tx, compl_tag,
+                                                  &miss_compl_bytes,
+                                                  &miss_compl_pkts);
+               } else if (type == GVE_COMPL_TYPE_DQO_REINJECTION) {
+                       u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
+
+                       gve_handle_packet_completion(priv, tx, !!napi,
+                                                    compl_tag,
+                                                    &reinject_compl_bytes,
+                                                    &reinject_compl_pkts,
+                                                    /*is_reinjection=*/true);
+               }
+
+               tx->dqo_compl.head =
+                       (tx->dqo_compl.head + 1) & tx->dqo.complq_mask;
+               /* Flip the generation bit when we wrap around */
+               tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0;
+               num_descs_cleaned++;
+       }
+
+       netdev_tx_completed_queue(tx->netdev_txq,
+                                 pkt_compl_pkts + miss_compl_pkts,
+                                 pkt_compl_bytes + miss_compl_bytes);
+
+       remove_miss_completions(priv, tx);
+       remove_timed_out_completions(priv, tx);
+
+       u64_stats_update_begin(&tx->statss);
+       tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
+       tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
+       u64_stats_update_end(&tx->statss);
+       return num_descs_cleaned;
+}
+
+bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
+{
+       struct gve_tx_compl_desc *compl_desc;
+       struct gve_tx_ring *tx = block->tx;
+       struct gve_priv *priv = block->priv;
+
+       if (do_clean) {
+               int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx,
+                                                             &block->napi);
+
+               /* Sync with queue being stopped in `gve_maybe_stop_tx_dqo()` */
+               mb();
+
+               if (netif_tx_queue_stopped(tx->netdev_txq) &&
+                   num_descs_cleaned > 0) {
+                       tx->wake_queue++;
+                       netif_tx_wake_queue(tx->netdev_txq);
+               }
+       }
+
+       /* Return true if we still have work. */
+       compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
+       return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
new file mode 100644 (file)
index 0000000..93f3dcb
--- /dev/null
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+#include "gve_utils.h"
+
+void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
+{
+       struct gve_notify_block *block =
+                       &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
+
+       block->tx = NULL;
+}
+
+void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
+{
+       int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
+       struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+       struct gve_tx_ring *tx = &priv->tx[queue_idx];
+
+       block->tx = tx;
+       tx->ntfy_id = ntfy_idx;
+}
+
+void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
+{
+       struct gve_notify_block *block =
+                       &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
+
+       block->rx = NULL;
+}
+
+void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
+{
+       u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
+       struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+       struct gve_rx_ring *rx = &priv->rx[queue_idx];
+
+       block->rx = rx;
+       rx->ntfy_id = ntfy_idx;
+}
+
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+                           struct gve_rx_slot_page_info *page_info, u16 len,
+                           u16 pad)
+{
+       struct sk_buff *skb = napi_alloc_skb(napi, len);
+       void *va = page_info->page_address + pad +
+                  page_info->page_offset;
+
+       if (unlikely(!skb))
+               return NULL;
+
+       __skb_put(skb, len);
+
+       skb_copy_to_linear_data(skb, va, len);
+
+       skb->protocol = eth_type_trans(skb, dev);
+
+       return skb;
+}
+
+void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
+{
+       page_info->pagecnt_bias--;
+       if (page_info->pagecnt_bias == 0) {
+               int pagecount = page_count(page_info->page);
+
+               /* If we have run out of bias - set it back up to INT_MAX
+                * minus the existing refs.
+                */
+               page_info->pagecnt_bias = INT_MAX - pagecount;
+
+               /* Set pagecount back up to max. */
+               page_ref_add(page_info->page, INT_MAX - pagecount);
+       }
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
new file mode 100644 (file)
index 0000000..7959594
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#ifndef _GVE_UTILS_H
+#define _GVE_UTILS_H
+
+#include <linux/etherdevice.h>
+
+#include "gve.h"
+
+void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx);
+void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx);
+
+void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
+void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
+
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+                           struct gve_rx_slot_page_info *page_info, u16 len,
+                           u16 pad);
+
+/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
+void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
+
+#endif /* _GVE_UTILS_H */
+
index bc67a7e..737ba85 100644 (file)
@@ -1285,36 +1285,41 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
                iph_proto = iph6->nexthdr;
        }
 
-       /* In OVS environment, when a flow is not cached, specifically for a
-        * new TCP connection, the first packet information is passed up
+       /* When CSO is enabled the TCP checksum may have be set to NULL by
+        * the sender given that we zeroed out TCP checksum field in
+        * transmit path (refer ibmveth_start_xmit routine). In this case set
+        * up CHECKSUM_PARTIAL. If the packet is forwarded, the checksum will
+        * then be recalculated by the destination NIC (CSO must be enabled
+        * on the destination NIC).
+        *
+        * In an OVS environment, when a flow is not cached, specifically for a
+        * new TCP connection, the first packet information is passed up to
         * the user space for finding a flow. During this process, OVS computes
         * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
         *
-        * Given that we zeroed out TCP checksum field in transmit path
-        * (refer ibmveth_start_xmit routine) as we set "no checksum bit",
-        * OVS computed checksum will be incorrect w/o TCP pseudo checksum
-        * in the packet. This leads to OVS dropping the packet and hence
-        * TCP retransmissions are seen.
-        *
-        * So, re-compute TCP pseudo header checksum.
+        * So, re-compute TCP pseudo header checksum when configured for
+        * trunk mode.
         */
-       if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) {
+       if (iph_proto == IPPROTO_TCP) {
                struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
-
-               tcphdrlen = skb->len - iphlen;
-
-               /* Recompute TCP pseudo header checksum */
-               if (skb_proto == ETH_P_IP)
-                       tcph->check = ~csum_tcpudp_magic(iph->saddr,
+               if (tcph->check == 0x0000) {
+                       /* Recompute TCP pseudo header checksum  */
+                       if (adapter->is_active_trunk) {
+                               tcphdrlen = skb->len - iphlen;
+                               if (skb_proto == ETH_P_IP)
+                                       tcph->check =
+                                        ~csum_tcpudp_magic(iph->saddr,
                                        iph->daddr, tcphdrlen, iph_proto, 0);
-               else if (skb_proto == ETH_P_IPV6)
-                       tcph->check = ~csum_ipv6_magic(&iph6->saddr,
+                               else if (skb_proto == ETH_P_IPV6)
+                                       tcph->check =
+                                        ~csum_ipv6_magic(&iph6->saddr,
                                        &iph6->daddr, tcphdrlen, iph_proto, 0);
-
-               /* Setup SKB fields for checksum offload */
-               skb_partial_csum_set(skb, iphlen,
-                                    offsetof(struct tcphdr, check));
-               skb_reset_network_header(skb);
+                       }
+                       /* Setup SKB fields for checksum offload */
+                       skb_partial_csum_set(skb, iphlen,
+                                            offsetof(struct tcphdr, check));
+                       skb_reset_network_header(skb);
+               }
        }
 }
 
index 857dc62..926cf74 100644 (file)
@@ -316,8 +316,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
 
        /* Notify AUX drivers about impending change to TCs */
        event = kzalloc(sizeof(*event), GFP_KERNEL);
-       if (!event)
-               return -ENOMEM;
+       if (!event) {
+               ret = -ENOMEM;
+               goto free_cfg;
+       }
 
        set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
        ice_send_event_to_aux(pf, event);
index 6989a76..76021d9 100644 (file)
 #define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S                4
 #define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M                ICE_M(0x3, 4)
 #define GLGEN_CLKSTAT_SRC                      0x000B826C
+#define GLGEN_GPIO_CTL(_i)                     (0x000880C8 + ((_i) * 4))
+#define GLGEN_GPIO_CTL_PIN_DIR_M               BIT(4)
+#define GLGEN_GPIO_CTL_PIN_FUNC_S              8
+#define GLGEN_GPIO_CTL_PIN_FUNC_M              ICE_M(0xF, 8)
 #define GLGEN_RSTAT                            0x000B8188
 #define GLGEN_RSTAT_DEVSTATE_M                 ICE_M(0x3, 0)
 #define GLGEN_RSTCTL                           0x000B8180
 #define PFINT_MBX_CTL_CAUSE_ENA_M              BIT(30)
 #define PFINT_OICR                             0x0016CA00
 #define PFINT_OICR_TSYN_TX_M                   BIT(11)
+#define PFINT_OICR_TSYN_EVNT_M                 BIT(12)
 #define PFINT_OICR_ECC_ERR_M                   BIT(16)
 #define PFINT_OICR_MAL_DETECT_M                        BIT(19)
 #define PFINT_OICR_GRST_M                      BIT(20)
 #define GLV_UPRCL(_i)                          (0x003B2000 + ((_i) * 8))
 #define GLV_UPTCL(_i)                          (0x0030A000 + ((_i) * 8))
 #define PRTRPB_RDPC                            0x000AC260
+#define GLTSYN_AUX_IN_0(_i)                    (0x000889D8 + ((_i) * 4))
+#define GLTSYN_AUX_IN_0_INT_ENA_M              BIT(4)
+#define GLTSYN_AUX_OUT_0(_i)                   (0x00088998 + ((_i) * 4))
+#define GLTSYN_AUX_OUT_0_OUT_ENA_M             BIT(0)
+#define GLTSYN_AUX_OUT_0_OUTMOD_M              ICE_M(0x3, 1)
+#define GLTSYN_CLKO_0(_i)                      (0x000889B8 + ((_i) * 4))
 #define GLTSYN_CMD                             0x00088810
 #define GLTSYN_CMD_SYNC                                0x00088814
 #define GLTSYN_ENA(_i)                         (0x00088808 + ((_i) * 4))
 #define GLTSYN_ENA_TSYN_ENA_M                  BIT(0)
+#define GLTSYN_EVNT_H_0(_i)                    (0x00088970 + ((_i) * 4))
+#define GLTSYN_EVNT_L_0(_i)                    (0x00088968 + ((_i) * 4))
 #define GLTSYN_INCVAL_H(_i)                    (0x00088920 + ((_i) * 4))
 #define GLTSYN_INCVAL_L(_i)                    (0x00088918 + ((_i) * 4))
 #define GLTSYN_SHADJ_H(_i)                     (0x00088910 + ((_i) * 4))
 #define GLTSYN_SHTIME_H(_i)                    (0x000888F0 + ((_i) * 4))
 #define GLTSYN_SHTIME_L(_i)                    (0x000888E8 + ((_i) * 4))
 #define GLTSYN_STAT(_i)                                (0x000888C0 + ((_i) * 4))
+#define GLTSYN_STAT_EVENT0_M                   BIT(0)
+#define GLTSYN_STAT_EVENT1_M                   BIT(1)
+#define GLTSYN_STAT_EVENT2_M                   BIT(2)
 #define GLTSYN_SYNC_DLAY                       0x00088818
+#define GLTSYN_TGT_H_0(_i)                     (0x00088930 + ((_i) * 4))
+#define GLTSYN_TGT_L_0(_i)                     (0x00088928 + ((_i) * 4))
 #define GLTSYN_TIME_H(_i)                      (0x000888D8 + ((_i) * 4))
 #define GLTSYN_TIME_L(_i)                      (0x000888D0 + ((_i) * 4))
 #define PFTSYN_SEM                             0x00088880
index 5c3ea50..ef8d181 100644 (file)
 #include "ice_dcb_lib.h"
 #include "ice_dcb_nl.h"
 #include "ice_devlink.h"
+/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
+ * ice tracepoint functions. This must be done exactly once across the
+ * ice driver.
+ */
+#define CREATE_TRACE_POINTS
+#include "ice_trace.h"
 
 #define DRV_SUMMARY    "Intel(R) Ethernet Connection E800 Series Linux Driver"
 static const char ice_driver_string[] = DRV_SUMMARY;
@@ -2811,6 +2817,18 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
                ice_ptp_process_ts(pf);
        }
 
+       if (oicr & PFINT_OICR_TSYN_EVNT_M) {
+               u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+               u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
+
+               /* Save EVENTs from GTSYN register */
+               pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
+                                                    GLTSYN_STAT_EVENT1_M |
+                                                    GLTSYN_STAT_EVENT2_M);
+               ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
+               kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
+       }
+
 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
        if (oicr & ICE_AUX_CRIT_ERR) {
                struct iidc_event *event;
@@ -5477,6 +5495,7 @@ static void ice_tx_dim_work(struct work_struct *work)
        itr = tx_profile[dim->profile_ix].itr;
        intrl = tx_profile[dim->profile_ix].intrl;
 
+       ice_trace(tx_dim_work, q_vector, dim);
        ice_write_itr(rc, itr);
        ice_write_intrl(q_vector, intrl);
 
@@ -5501,6 +5520,7 @@ static void ice_rx_dim_work(struct work_struct *work)
        itr = rx_profile[dim->profile_ix].itr;
        intrl = rx_profile[dim->profile_ix].intrl;
 
+       ice_trace(rx_dim_work, q_vector, dim);
        ice_write_itr(rc, itr);
        ice_write_intrl(q_vector, intrl);
 
index 609f433..5d5207b 100644 (file)
@@ -4,6 +4,8 @@
 #include "ice.h"
 #include "ice_lib.h"
 
+#define E810_OUT_PROP_DELAY_NS 1
+
 /**
  * ice_set_tx_tstamp - Enable or disable Tx timestamping
  * @pf: The PF pointer to search in
@@ -483,6 +485,255 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
        return 0;
 }
 
+/**
+ * ice_ptp_extts_work - Workqueue task function
+ * @work: external timestamp work structure
+ *
+ * Service for PTP external clock event
+ */
+static void ice_ptp_extts_work(struct kthread_work *work)
+{
+       struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
+       struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+       struct ptp_clock_event event;
+       struct ice_hw *hw = &pf->hw;
+       u8 chan, tmr_idx;
+       u32 hi, lo;
+
+       tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+       /* Event time is captured by one of the two matched registers
+        *      GLTSYN_EVNT_L: 32 LSB of sampled time event
+        *      GLTSYN_EVNT_H: 32 MSB of sampled time event
+        * Event is defined in GLTSYN_EVNT_0 register
+        */
+       for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+               /* Check if channel is enabled */
+               if (pf->ptp.ext_ts_irq & (1 << chan)) {
+                       lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+                       hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+                       event.timestamp = (((u64)hi) << 32) | lo;
+                       event.type = PTP_CLOCK_EXTTS;
+                       event.index = chan;
+
+                       /* Fire event */
+                       ptp_clock_event(pf->ptp.clock, &event);
+                       pf->ptp.ext_ts_irq &= ~(1 << chan);
+               }
+       }
+}
+
+/**
+ * ice_ptp_cfg_extts - Configure EXTTS pin and channel
+ * @pf: Board private structure
+ * @ena: true to enable; false to disable
+ * @chan: GPIO channel (0-3)
+ * @gpio_pin: GPIO pin
+ * @extts_flags: request flags from the ptp_extts_request.flags
+ */
+static int
+ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
+                 unsigned int extts_flags)
+{
+       u32 func, aux_reg, gpio_reg, irq_reg;
+       struct ice_hw *hw = &pf->hw;
+       u8 tmr_idx;
+
+       if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
+               return -EINVAL;
+
+       tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+       irq_reg = rd32(hw, PFINT_OICR_ENA);
+
+       if (ena) {
+               /* Enable the interrupt */
+               irq_reg |= PFINT_OICR_TSYN_EVNT_M;
+               aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
+
+#define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE    BIT(0)
+#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE   BIT(1)
+
+               /* set event level to requested edge */
+               if (extts_flags & PTP_FALLING_EDGE)
+                       aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
+               if (extts_flags & PTP_RISING_EDGE)
+                       aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
+
+               /* Write GPIO CTL reg.
+                * 0x1 is input sampled by EVENT register(channel)
+                * + num_in_channels * tmr_idx
+                */
+               func = 1 + chan + (tmr_idx * 3);
+               gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
+                           GLGEN_GPIO_CTL_PIN_FUNC_M);
+               pf->ptp.ext_ts_chan |= (1 << chan);
+       } else {
+               /* clear the values we set to reset defaults */
+               aux_reg = 0;
+               gpio_reg = 0;
+               pf->ptp.ext_ts_chan &= ~(1 << chan);
+               if (!pf->ptp.ext_ts_chan)
+                       irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
+       }
+
+       wr32(hw, PFINT_OICR_ENA, irq_reg);
+       wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
+       wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
+
+       return 0;
+}
+
+/**
+ * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
+ * @pf: Board private structure
+ * @chan: GPIO channel (0-3)
+ * @config: desired periodic clk configuration. NULL will disable channel
+ * @store: If set to true the values will be stored
+ *
+ * Configure the internal clock generator modules to generate the clock wave of
+ * specified period.
+ */
+static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
+                             struct ice_perout_channel *config, bool store)
+{
+       u64 current_time, period, start_time, phase;
+       struct ice_hw *hw = &pf->hw;
+       u32 func, val, gpio_pin;
+       u8 tmr_idx;
+
+       tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+       /* 0. Reset mode & out_en in AUX_OUT */
+       wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
+
+       /* If we're disabling the output, clear out CLKO and TGT and keep
+        * output level low
+        */
+       if (!config || !config->ena) {
+               wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
+               wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
+               wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
+
+               val = GLGEN_GPIO_CTL_PIN_DIR_M;
+               gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
+               wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+
+               /* Store the value if requested */
+               if (store)
+                       memset(&pf->ptp.perout_channels[chan], 0,
+                              sizeof(struct ice_perout_channel));
+
+               return 0;
+       }
+       period = config->period;
+       start_time = config->start_time;
+       div64_u64_rem(start_time, period, &phase);
+       gpio_pin = config->gpio_pin;
+
+       /* 1. Write clkout with half of required period value */
+       if (period & 0x1) {
+               dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
+               goto err;
+       }
+
+       period >>= 1;
+
+       /* For proper operation, the GLTSYN_CLKO must be larger than clock tick
+        */
+#define MIN_PULSE 3
+       if (period <= MIN_PULSE || period > U32_MAX) {
+               dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
+                       MIN_PULSE * 2);
+               goto err;
+       }
+
+       wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
+
+       /* Allow time for programming before start_time is hit */
+       current_time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+       /* if start time is in the past start the timer at the nearest second
+        * maintaining phase
+        */
+       if (start_time < current_time)
+               start_time = div64_u64(current_time + NSEC_PER_MSEC - 1,
+                                      NSEC_PER_SEC) * NSEC_PER_SEC + phase;
+
+       start_time -= E810_OUT_PROP_DELAY_NS;
+
+       /* 2. Write TARGET time */
+       wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
+       wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
+
+       /* 3. Write AUX_OUT register */
+       val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
+       wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
+
+       /* 4. write GPIO CTL reg */
+       func = 8 + chan + (tmr_idx * 4);
+       val = GLGEN_GPIO_CTL_PIN_DIR_M |
+             ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
+       wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+
+       /* Store the value if requested */
+       if (store) {
+               memcpy(&pf->ptp.perout_channels[chan], config,
+                      sizeof(struct ice_perout_channel));
+               pf->ptp.perout_channels[chan].start_time = phase;
+       }
+
+       return 0;
+err:
+       dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
+       return -EFAULT;
+}
+
+/**
+ * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ */
+static int
+ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
+                        struct ptp_clock_request *rq, int on)
+{
+       struct ice_pf *pf = ptp_info_to_pf(info);
+       struct ice_perout_channel clk_cfg = {0};
+       unsigned int chan;
+       u32 gpio_pin;
+       int err;
+
+       switch (rq->type) {
+       case PTP_CLK_REQ_PEROUT:
+               chan = rq->perout.index;
+               if (chan == PPS_CLK_GEN_CHAN)
+                       clk_cfg.gpio_pin = PPS_PIN_INDEX;
+               else
+                       clk_cfg.gpio_pin = chan;
+
+               clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
+                                  rq->perout.period.nsec);
+               clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
+                                      rq->perout.start.nsec);
+               clk_cfg.ena = !!on;
+
+               err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
+               break;
+       case PTP_CLK_REQ_EXTTS:
+               chan = rq->extts.index;
+               gpio_pin = chan;
+
+               err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
+                                       rq->extts.flags);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return err;
+}
+
 /**
  * ice_ptp_gettimex64 - Get the time of the clock
  * @info: the driver's PTP info structure
@@ -740,6 +991,34 @@ ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
        }
 }
 
+/**
+ * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
+ * @info: PTP clock capabilities
+ */
+static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
+{
+       info->n_per_out = E810_N_PER_OUT;
+       info->n_ext_ts = E810_N_EXT_TS;
+}
+
+/**
+ * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E810 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for e810
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+       info->enable = ice_ptp_gpio_enable_e810;
+
+       ice_ptp_setup_pins_e810(info);
+}
+
 /**
  * ice_ptp_set_caps - Set PTP capabilities
  * @pf: Board private structure
@@ -757,6 +1036,8 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
        info->adjfine = ice_ptp_adjfine;
        info->gettimex64 = ice_ptp_gettimex64;
        info->settime64 = ice_ptp_settime64;
+
+       ice_ptp_set_funcs_e810(pf, info);
 }
 
 /**
@@ -783,6 +1064,17 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
        info = &pf->ptp.info;
        dev = ice_pf_to_dev(pf);
 
+       /* Allocate memory for kernel pins interface */
+       if (info->n_pins) {
+               info->pin_config = devm_kcalloc(dev, info->n_pins,
+                                               sizeof(*info->pin_config),
+                                               GFP_KERNEL);
+               if (!info->pin_config) {
+                       info->n_pins = 0;
+                       return -ENOMEM;
+               }
+       }
+
        /* Attempt to register the clock before enabling the hardware. */
        clock = ptp_clock_register(info, dev);
        if (IS_ERR(clock))
@@ -1203,6 +1495,7 @@ void ice_ptp_init(struct ice_pf *pf)
 
        /* Initialize work functions */
        kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
+       kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work);
 
        /* Allocate a kworker for handling work required for the ports
         * connected to the PTP hardware clock.
index d01507e..e1c787b 100644 (file)
@@ -9,6 +9,21 @@
 
 #include "ice_ptp_hw.h"
 
+enum ice_ptp_pin {
+       GPIO_20 = 0,
+       GPIO_21,
+       GPIO_22,
+       GPIO_23,
+       NUM_ICE_PTP_PIN
+};
+
+struct ice_perout_channel {
+       bool ena;
+       u32 gpio_pin;
+       u64 period;
+       u64 start_time;
+};
+
 /* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
  * is stored in a buffer of registers. Depending on the specific hardware,
  * this buffer might be shared across multiple PHY ports.
@@ -82,12 +97,18 @@ struct ice_ptp_port {
        struct ice_ptp_tx tx;
 };
 
+#define GLTSYN_TGT_H_IDX_MAX           4
+
 /**
  * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
  * @port: data for the PHY port initialization procedure
  * @work: delayed work function for periodic tasks
+ * @extts_work: work function for handling external Tx timestamps
  * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @ext_ts_chan: the external timestamp channel in use
+ * @ext_ts_irq: the external timestamp IRQ in use
  * @kworker: kwork thread for handling periodic work
+ * @perout_channels: periodic output data
  * @info: structure defining PTP hardware capabilities
  * @clock: pointer to registered PTP clock device
  * @tstamp_config: hardware timestamping configuration
@@ -95,8 +116,12 @@ struct ice_ptp_port {
 struct ice_ptp {
        struct ice_ptp_port port;
        struct kthread_delayed_work work;
+       struct kthread_work extts_work;
        u64 cached_phc_time;
+       u8 ext_ts_chan;
+       u8 ext_ts_irq;
        struct kthread_worker *kworker;
+       struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
        struct ptp_clock_info info;
        struct ptp_clock *clock;
        struct hwtstamp_config tstamp_config;
@@ -115,6 +140,24 @@ struct ice_ptp {
 #define PTP_SHARED_CLK_IDX_VALID       BIT(31)
 #define ICE_PTP_TS_VALID               BIT(0)
 
+/* Per-channel register definitions */
+#define GLTSYN_AUX_OUT(_chan, _idx)    (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
+#define GLTSYN_AUX_IN(_chan, _idx)     (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
+#define GLTSYN_CLKO(_chan, _idx)       (GLTSYN_CLKO_0(_idx) + ((_chan) * 8))
+#define GLTSYN_TGT_L(_chan, _idx)      (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_TGT_H(_chan, _idx)      (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_L(_chan, _idx)     (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H(_chan, _idx)     (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H_IDX_MAX          3
+
+/* Pin definitions for PTP PPS out */
+#define PPS_CLK_GEN_CHAN               3
+#define PPS_CLK_SRC_CHAN               2
+#define PPS_PIN_INDEX                  5
+#define TIME_SYNC_PIN_INDEX            4
+#define E810_N_EXT_TS                  3
+#define E810_N_PER_OUT                 4
+
 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
 struct ice_pf;
 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
index a17e24e..9f07b66 100644 (file)
@@ -2745,8 +2745,8 @@ static enum ice_status
 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
                           u16 vsi_handle, unsigned long *tc_bitmap)
 {
-       struct ice_sched_agg_vsi_info *agg_vsi_info;
-       struct ice_sched_agg_info *agg_info;
+       struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
+       struct ice_sched_agg_info *agg_info, *old_agg_info;
        enum ice_status status = 0;
        struct ice_hw *hw = pi->hw;
        u8 tc;
@@ -2756,6 +2756,20 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
        agg_info = ice_get_agg_info(hw, agg_id);
        if (!agg_info)
                return ICE_ERR_PARAM;
+       /* If the VSI is already part of another aggregator then update
+        * its VSI info list
+        */
+       old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
+       if (old_agg_info && old_agg_info != agg_info) {
+               struct ice_sched_agg_vsi_info *vtmp;
+
+               list_for_each_entry_safe(old_agg_vsi_info, vtmp,
+                                        &old_agg_info->agg_vsi_list,
+                                        list_entry)
+                       if (old_agg_vsi_info->vsi_handle == vsi_handle)
+                               break;
+       }
+
        /* check if entry already exist */
        agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
        if (!agg_vsi_info) {
@@ -2780,6 +2794,12 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
                        break;
 
                set_bit(tc, agg_vsi_info->tc_bitmap);
+               if (old_agg_vsi_info)
+                       clear_bit(tc, old_agg_vsi_info->tc_bitmap);
+       }
+       if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
+               list_del(&old_agg_vsi_info->list_entry);
+               devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info);
        }
        return status;
 }
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
new file mode 100644 (file)
index 0000000..9bc0b8f
--- /dev/null
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Intel Corporation. */
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for ice will be "ice".
+ *
+ * This file is named ice_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ice
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_ICE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _ICE_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/* ice_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_ice_example(args...)
+ *
+ * ... as:
+ *
+ * ice_trace(example, args...)
+ *
+ * ... to resolve to the PF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, ice_trace_enabled(trace_name) wraps references to
+ * trace_ice_<trace_name>_enabled() functions.
+ * @trace_name: name of tracepoint
+ */
+#define _ICE_TRACE_NAME(trace_name) (trace_##ice##_##trace_name)
+#define ICE_TRACE_NAME(trace_name) _ICE_TRACE_NAME(trace_name)
+
+#define ice_trace(trace_name, args...) ICE_TRACE_NAME(trace_name)(args)
+
+#define ice_trace_enabled(trace_name) ICE_TRACE_NAME(trace_name##_enabled)()
+
+/* This is for events common to PF. Corresponding versions will be named
+ * trace_ice_*. The ice_trace() macro above will select the right trace point
+ * name for the driver.
+ */
+
+/* Begin tracepoints */
+
+/* Global tracepoints */
+
+/* Events related to DIM, q_vectors and ring containers */
+DECLARE_EVENT_CLASS(ice_rx_dim_template,
+                   TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+                   TP_ARGS(q_vector, dim),
+                   TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
+                                    __field(struct dim *, dim)
+                                    __string(devname, q_vector->rx.ring->netdev->name)),
+
+                   TP_fast_assign(__entry->q_vector = q_vector;
+                                  __entry->dim = dim;
+                                  __assign_str(devname, q_vector->rx.ring->netdev->name);),
+
+                   TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
+                             __get_str(devname),
+                             __entry->q_vector->rx.ring->q_index,
+                             __entry->dim->state,
+                             __entry->dim->profile_ix,
+                             __entry->dim->tune_state,
+                             __entry->dim->steps_right,
+                             __entry->dim->steps_left,
+                             __entry->dim->tired)
+);
+
+DEFINE_EVENT(ice_rx_dim_template, ice_rx_dim_work,
+            TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+            TP_ARGS(q_vector, dim)
+);
+
+DECLARE_EVENT_CLASS(ice_tx_dim_template,
+                   TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+                   TP_ARGS(q_vector, dim),
+                   TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
+                                    __field(struct dim *, dim)
+                                    __string(devname, q_vector->tx.ring->netdev->name)),
+
+                   TP_fast_assign(__entry->q_vector = q_vector;
+                                  __entry->dim = dim;
+                                  __assign_str(devname, q_vector->tx.ring->netdev->name);),
+
+                   TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
+                             __get_str(devname),
+                             __entry->q_vector->tx.ring->q_index,
+                             __entry->dim->state,
+                             __entry->dim->profile_ix,
+                             __entry->dim->tune_state,
+                             __entry->dim->steps_right,
+                             __entry->dim->steps_left,
+                             __entry->dim->tired)
+);
+
+DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work,
+            TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+            TP_ARGS(q_vector, dim)
+);
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(ice_tx_template,
+                   TP_PROTO(struct ice_ring *ring, struct ice_tx_desc *desc,
+                            struct ice_tx_buf *buf),
+
+                   TP_ARGS(ring, desc, buf),
+                   TP_STRUCT__entry(__field(void *, ring)
+                                    __field(void *, desc)
+                                    __field(void *, buf)
+                                    __string(devname, ring->netdev->name)),
+
+                   TP_fast_assign(__entry->ring = ring;
+                                  __entry->desc = desc;
+                                  __entry->buf = buf;
+                                  __assign_str(devname, ring->netdev->name);),
+
+                   TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname),
+                             __entry->ring, __entry->desc, __entry->buf)
+);
+
+#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \
+DEFINE_EVENT(ice_tx_template, name, \
+            TP_PROTO(struct ice_ring *ring, \
+                     struct ice_tx_desc *desc, \
+                     struct ice_tx_buf *buf), \
+            TP_ARGS(ring, desc, buf))
+
+DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq);
+DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap);
+DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop);
+
+DECLARE_EVENT_CLASS(ice_rx_template,
+                   TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+
+                   TP_ARGS(ring, desc),
+
+                   TP_STRUCT__entry(__field(void *, ring)
+                                    __field(void *, desc)
+                                    __string(devname, ring->netdev->name)),
+
+                   TP_fast_assign(__entry->ring = ring;
+                                  __entry->desc = desc;
+                                  __assign_str(devname, ring->netdev->name);),
+
+                   TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname),
+                             __entry->ring, __entry->desc)
+);
+DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
+            TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+            TP_ARGS(ring, desc)
+);
+
+DECLARE_EVENT_CLASS(ice_rx_indicate_template,
+                   TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+                            struct sk_buff *skb),
+
+                   TP_ARGS(ring, desc, skb),
+
+                   TP_STRUCT__entry(__field(void *, ring)
+                                    __field(void *, desc)
+                                    __field(void *, skb)
+                                    __string(devname, ring->netdev->name)),
+
+                   TP_fast_assign(__entry->ring = ring;
+                                  __entry->desc = desc;
+                                  __entry->skb = skb;
+                                  __assign_str(devname, ring->netdev->name);),
+
+                   TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname),
+                             __entry->ring, __entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate,
+            TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+                     struct sk_buff *skb),
+            TP_ARGS(ring, desc, skb)
+);
+
+DECLARE_EVENT_CLASS(ice_xmit_template,
+                   TP_PROTO(struct ice_ring *ring, struct sk_buff *skb),
+
+                   TP_ARGS(ring, skb),
+
+                   TP_STRUCT__entry(__field(void *, ring)
+                                    __field(void *, skb)
+                                    __string(devname, ring->netdev->name)),
+
+                   TP_fast_assign(__entry->ring = ring;
+                                  __entry->skb = skb;
+                                  __assign_str(devname, ring->netdev->name);),
+
+                   TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname),
+                             __entry->skb, __entry->ring)
+);
+
+#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \
+DEFINE_EVENT(ice_xmit_template, name, \
+            TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), \
+            TP_ARGS(ring, skb))
+
+DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
+DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring_drop);
+
+/* End tracepoints */
+
+#endif /* _ICE_TRACE_H_ */
+/* This must be outside ifdef _ICE_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE ../../drivers/net/ethernet/intel/ice/ice_trace
+#include <trace/define_trace.h>
index e9e9edb..a63d591 100644 (file)
@@ -10,6 +10,7 @@
 #include "ice_txrx_lib.h"
 #include "ice_lib.h"
 #include "ice.h"
+#include "ice_trace.h"
 #include "ice_dcb_lib.h"
 #include "ice_xsk.h"
 
@@ -224,6 +225,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
 
                smp_rmb();      /* prevent any other reads prior to eop_desc */
 
+               ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
                /* if the descriptor isn't done, no work yet to do */
                if (!(eop_desc->cmd_type_offset_bsz &
                      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
@@ -254,6 +256,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
 
                /* unmap remaining buffers */
                while (tx_desc != eop_desc) {
+                       ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
                        tx_buf++;
                        tx_desc++;
                        i++;
@@ -272,6 +275,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
                                dma_unmap_len_set(tx_buf, len, 0);
                        }
                }
+               ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
 
                /* move us one more past the eop_desc for start of next pkt */
                tx_buf++;
@@ -1102,6 +1106,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
                 */
                dma_rmb();
 
+               ice_trace(clean_rx_irq, rx_ring, rx_desc);
                if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
                        struct ice_vsi *ctrl_vsi = rx_ring->vsi;
 
@@ -1207,6 +1212,7 @@ construct_skb:
 
                ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
 
+               ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
                /* send completed skb up the stack */
                ice_receive_skb(rx_ring, skb, vlan_tag);
                skb = NULL;
@@ -2188,6 +2194,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
        unsigned int count;
        int tso, csum;
 
+       ice_trace(xmit_frame_ring, tx_ring, skb);
+
        count = ice_xmit_desc_count(skb);
        if (ice_chk_linearize(skb, count)) {
                if (__skb_linearize(skb))
@@ -2262,6 +2270,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
        return NETDEV_TX_OK;
 
 out_drop:
+       ice_trace(xmit_frame_ring_drop, tx_ring, skb);
        dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
index 6392e0b..2826570 100644 (file)
@@ -1689,7 +1689,6 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
                else
                        promisc_m = ICE_UCAST_PROMISC_BITS;
 
-               vsi = ice_get_vf_vsi(vf);
                if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
                        dev_err(dev, "disabling promiscuous mode failed\n");
        }
index 7537ee3..62a97c4 100644 (file)
@@ -18,9 +18,9 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/acpi_mdio.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
-#include <linux/fwnode_mdio.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
@@ -371,7 +371,13 @@ static int orion_mdio_probe(struct platform_device *pdev)
                goto out_mdio;
        }
 
-       ret = fwnode_mdiobus_register(bus, pdev->dev.fwnode);
+       /* For the platforms not supporting DT/ACPI fall-back
+        * to mdiobus_register via of_mdiobus_register.
+        */
+       if (is_acpi_node(pdev->dev.fwnode))
+               ret = acpi_mdiobus_register(bus, pdev->dev.fwnode);
+       else
+               ret = of_mdiobus_register(bus, pdev->dev.of_node);
        if (ret < 0) {
                dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
                goto out_mdio;
index ff6613a..b4f66eb 100644 (file)
@@ -22,5 +22,6 @@ source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
 source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
 source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
 source "drivers/net/ethernet/mellanox/mlxfw/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig"
 
 endif # NET_VENDOR_MELLANOX
index 79773ac..d4b5f54 100644 (file)
@@ -7,3 +7,4 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_MLX5_CORE) += mlx5/core/
 obj-$(CONFIG_MLXSW_CORE) += mlxsw/
 obj-$(CONFIG_MLXFW) += mlxfw/
+obj-$(CONFIG_MLXBF_GIGE) += mlxbf_gige/
index d62f90a..e1a5a79 100644 (file)
@@ -12,7 +12,6 @@ config MLX5_CORE
        depends on MLXFW || !MLXFW
        depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
        depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
-       default n
        help
          Core driver for low level functionality of the ConnectX-4 and
          Connect-IB cards by Mellanox Technologies.
@@ -36,7 +35,6 @@ config MLX5_CORE_EN
        depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
        select PAGE_POOL
        select DIMLIB
-       default n
        help
          Ethernet support in Mellanox Technologies ConnectX-4 NIC.
 
@@ -141,7 +139,6 @@ config MLX5_CORE_EN_DCB
 config MLX5_CORE_IPOIB
        bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support"
        depends on MLX5_CORE_EN
-       default n
        help
          MLX5 IPoIB offloads & acceleration support.
 
@@ -149,7 +146,6 @@ config MLX5_FPGA_IPSEC
        bool "Mellanox Technologies IPsec Innova support"
        depends on MLX5_CORE
        depends on MLX5_FPGA
-       default n
        help
        Build IPsec support for the Innova family of network cards by Mellanox
        Technologies. Innova network cards are comprised of a ConnectX chip
@@ -163,7 +159,6 @@ config MLX5_IPSEC
        depends on XFRM_OFFLOAD
        depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
        select MLX5_ACCEL
-       default n
        help
        Build IPsec support for the Connect-X family of network cards by Mellanox
        Technologies.
@@ -176,7 +171,6 @@ config MLX5_EN_IPSEC
        depends on XFRM_OFFLOAD
        depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
        depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
-       default n
        help
          Build support for IPsec cryptography-offload acceleration in the NIC.
          Note: Support for hardware with this capability needs to be selected
@@ -189,7 +183,6 @@ config MLX5_FPGA_TLS
        depends on MLX5_CORE_EN
        depends on MLX5_FPGA
        select MLX5_EN_TLS
-       default n
        help
        Build TLS support for the Innova family of network cards by Mellanox
        Technologies. Innova network cards are comprised of a ConnectX chip
@@ -204,7 +197,6 @@ config MLX5_TLS
        depends on MLX5_CORE_EN
        select MLX5_ACCEL
        select MLX5_EN_TLS
-       default n
        help
        Build TLS support for the Connect-X family of network cards by Mellanox
        Technologies.
@@ -227,7 +219,6 @@ config MLX5_SW_STEERING
 config MLX5_SF
        bool "Mellanox Technologies subfunction device support using auxiliary device"
        depends on MLX5_CORE && MLX5_CORE_EN
-       default n
        help
        Build support for subfuction device in the NIC. A Mellanox subfunction
        device can support RDMA, netdevice and vdpa device.
index a97e8d2..33de8f0 100644 (file)
@@ -136,8 +136,6 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
                                struct mlx5_wqe_eth_seg *eseg, u8 mode,
                                struct xfrm_offload *xo)
 {
-       struct mlx5e_swp_spec swp_spec = {};
-
        /* Tunnel Mode:
         * SWP:      OutL3       InL3  InL4
         * Pkt: MAC  IP     ESP  IP    L4
@@ -146,23 +144,58 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
         * SWP:      OutL3       InL4
         *           InL3
         * Pkt: MAC  IP     ESP  L4
+        *
+        * Tunnel(VXLAN TCP/UDP) over Transport Mode
+        * SWP:      OutL3                   InL3  InL4
+        * Pkt: MAC  IP     ESP  UDP  VXLAN  IP    L4
         */
-       swp_spec.l3_proto = skb->protocol;
-       swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
-       if (swp_spec.is_tun) {
-               if (xo->proto == IPPROTO_IPV6) {
-                       swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
-                       swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
-               } else {
-                       swp_spec.tun_l3_proto = htons(ETH_P_IP);
-                       swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
-               }
-       } else {
-               swp_spec.tun_l3_proto = skb->protocol;
-               swp_spec.tun_l4_proto = xo->proto;
+
+       /* Shared settings */
+       eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+       if (skb->protocol == htons(ETH_P_IPV6))
+               eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+       /* Tunnel mode */
+       if (mode == XFRM_MODE_TUNNEL) {
+               eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+               eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+               if (xo->proto == IPPROTO_IPV6)
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+               if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+               return;
+       }
+
+       /* Transport mode */
+       if (mode != XFRM_MODE_TRANSPORT)
+               return;
+
+       if (!xo->inner_ipproto) {
+               eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
+               eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+               if (xo->proto == IPPROTO_UDP)
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+               return;
+       }
+
+       /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
+       switch (xo->inner_ipproto) {
+       case IPPROTO_UDP:
+               eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+               fallthrough;
+       case IPPROTO_TCP:
+               eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+               eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+               break;
+       default:
+               break;
        }
 
-       mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+       return;
 }
 
 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
index 3e80742..5120a59 100644 (file)
@@ -93,18 +93,38 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
 void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
                               struct mlx5_wqe_eth_seg *eseg);
 
-static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
-                                            netdev_features_t features)
+static inline netdev_features_t
+mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
 {
+       struct xfrm_offload *xo = xfrm_offload(skb);
        struct sec_path *sp = skb_sec_path(skb);
 
-       if (sp && sp->len) {
+       if (sp && sp->len && xo) {
                struct xfrm_state *x = sp->xvec[0];
 
-               if (x && x->xso.offload_handle)
-                       return true;
+               if (!x || !x->xso.offload_handle)
+                       goto out_disable;
+
+               if (xo->inner_ipproto) {
+                       /* Cannot support tunnel packet over IPsec tunnel mode
+                        * because we cannot offload three IP header csum
+                        */
+                       if (x->props.mode == XFRM_MODE_TUNNEL)
+                               goto out_disable;
+
+                       /* Only support UDP or TCP L4 checksum */
+                       if (xo->inner_ipproto != IPPROTO_UDP &&
+                           xo->inner_ipproto != IPPROTO_TCP)
+                               goto out_disable;
+               }
+
+               return features;
+
        }
-       return false;
+
+       /* Disable CSUM and GSO for software IPsec */
+out_disable:
+       return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 }
 
 #else
@@ -120,8 +140,9 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
 }
 
 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
-static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
-                                            netdev_features_t features) { return false; }
+static inline netdev_features_t
+mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
+{ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
 #endif /* CONFIG_MLX5_EN_IPSEC */
 
 #endif /* __MLX5E_IPSEC_RXTX_H__ */
index 7d732fa..414a73d 100644 (file)
@@ -4330,6 +4330,11 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                /* Support Geneve offload for default UDP port */
                if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
                        return features;
+#endif
+               break;
+#ifdef CONFIG_MLX5_EN_IPSEC
+       case IPPROTO_ESP:
+               return mlx5e_ipsec_feature_check(skb, features);
 #endif
        }
 
@@ -4347,9 +4352,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
        features = vlan_features_check(skb, features);
        features = vxlan_features_check(skb, features);
 
-       if (mlx5e_ipsec_feature_check(skb, netdev, features))
-               return features;
-
        /* Validate if the tunneled packet is being offloaded by HW */
        if (skb->encapsulation &&
            (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
index c7efd17..6e074cc 100644 (file)
@@ -712,7 +712,7 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev,
        struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
        int err;
 
-       if (!param->affinity)
+       if (!cpumask_available(param->affinity))
                return ERR_PTR(-EINVAL);
 
        if (!eq)
index 2cd7aea..b861745 100644 (file)
@@ -2969,8 +2969,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
                return err;
 
        steering = kzalloc(sizeof(*steering), GFP_KERNEL);
-       if (!steering)
+       if (!steering) {
+               err = -ENOMEM;
                goto err;
+       }
+
        steering->dev = dev;
        dev->priv.steering = steering;
 
index 27de8da..b25f764 100644 (file)
@@ -479,7 +479,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
        if (!mlx5_sf_max_functions(dev))
                return 0;
        if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
-               mlx5_core_err(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
+               mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
                return 0;
        }
 
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig b/drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig
new file mode 100644 (file)
index 0000000..4cdebaf
--- /dev/null
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+#
+# Mellanox GigE driver configuration
+#
+
+config MLXBF_GIGE
+       tristate "Mellanox Technologies BlueField Gigabit Ethernet support"
+       depends on (ARM64 && ACPI) || COMPILE_TEST
+       select PHYLIB
+       help
+         The second generation BlueField SoC from Mellanox Technologies
+         supports an out-of-band Gigabit Ethernet management port to the
+         Arm subsystem.
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile b/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile
new file mode 100644 (file)
index 0000000..e57c137
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+obj-$(CONFIG_MLXBF_GIGE) += mlxbf_gige.o
+
+mlxbf_gige-y := mlxbf_gige_ethtool.o \
+               mlxbf_gige_gpio.o \
+               mlxbf_gige_intr.o \
+               mlxbf_gige_main.o \
+               mlxbf_gige_mdio.o \
+               mlxbf_gige_rx.o   \
+               mlxbf_gige_tx.o
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
new file mode 100644 (file)
index 0000000..e3509e6
--- /dev/null
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* Header file for Gigabit Ethernet driver for Mellanox BlueField SoC
+ * - this file contains software data structures and any chip-specific
+ *   data structures (e.g. TX WQE format) that are memory resident.
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#ifndef __MLXBF_GIGE_H__
+#define __MLXBF_GIGE_H__
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/irqreturn.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+
+/* The silicon design supports a maximum RX ring size of
+ * 32K entries. Based on current testing this maximum size
+ * is not required to be supported.  Instead the RX ring
+ * will be capped at a realistic value of 1024 entries.
+ */
+#define MLXBF_GIGE_MIN_RXQ_SZ     32
+#define MLXBF_GIGE_MAX_RXQ_SZ     1024
+#define MLXBF_GIGE_DEFAULT_RXQ_SZ 128
+
+#define MLXBF_GIGE_MIN_TXQ_SZ     4
+#define MLXBF_GIGE_MAX_TXQ_SZ     256
+#define MLXBF_GIGE_DEFAULT_TXQ_SZ 128
+
+#define MLXBF_GIGE_DEFAULT_BUF_SZ 2048
+
+#define MLXBF_GIGE_DMA_PAGE_SZ    4096
+#define MLXBF_GIGE_DMA_PAGE_SHIFT 12
+
+/* There are four individual MAC RX filters. Currently
+ * two of them are being used: one for the broadcast MAC
+ * (index 0) and one for local MAC (index 1)
+ */
+#define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
+#define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
+
+/* Define for broadcast MAC literal */
+#define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
+
+/* There are three individual interrupts:
+ *   1) Errors, "OOB" interrupt line
+ *   2) Receive Packet, "OOB_LLU" interrupt line
+ *   3) LLU and PLU Events, "OOB_PLU" interrupt line
+ */
+#define MLXBF_GIGE_ERROR_INTR_IDX       0
+#define MLXBF_GIGE_RECEIVE_PKT_INTR_IDX 1
+#define MLXBF_GIGE_LLU_PLU_INTR_IDX     2
+#define MLXBF_GIGE_PHY_INT_N            3
+
+#define MLXBF_GIGE_MDIO_DEFAULT_PHY_ADDR 0x3
+
+#define MLXBF_GIGE_DEFAULT_PHY_INT_GPIO 12
+
+struct mlxbf_gige_stats {
+       u64 hw_access_errors;
+       u64 tx_invalid_checksums;
+       u64 tx_small_frames;
+       u64 tx_index_errors;
+       u64 sw_config_errors;
+       u64 sw_access_errors;
+       u64 rx_truncate_errors;
+       u64 rx_mac_errors;
+       u64 rx_din_dropped_pkts;
+       u64 tx_fifo_full;
+       u64 rx_filter_passed_pkts;
+       u64 rx_filter_discard_pkts;
+};
+
+struct mlxbf_gige {
+       void __iomem *base;
+       void __iomem *llu_base;
+       void __iomem *plu_base;
+       struct device *dev;
+       struct net_device *netdev;
+       struct platform_device *pdev;
+       void __iomem *mdio_io;
+       struct mii_bus *mdiobus;
+       void __iomem *gpio_io;
+       struct irq_domain *irqdomain;
+       u32 phy_int_gpio_mask;
+       spinlock_t lock;      /* for packet processing indices */
+       spinlock_t gpio_lock; /* for GPIO bus access */
+       u16 rx_q_entries;
+       u16 tx_q_entries;
+       u64 *tx_wqe_base;
+       dma_addr_t tx_wqe_base_dma;
+       u64 *tx_wqe_next;
+       u64 *tx_cc;
+       dma_addr_t tx_cc_dma;
+       dma_addr_t *rx_wqe_base;
+       dma_addr_t rx_wqe_base_dma;
+       u64 *rx_cqe_base;
+       dma_addr_t rx_cqe_base_dma;
+       u16 tx_pi;
+       u16 prev_tx_ci;
+       u64 error_intr_count;
+       u64 rx_intr_count;
+       u64 llu_plu_intr_count;
+       struct sk_buff *rx_skb[MLXBF_GIGE_MAX_RXQ_SZ];
+       struct sk_buff *tx_skb[MLXBF_GIGE_MAX_TXQ_SZ];
+       int error_irq;
+       int rx_irq;
+       int llu_plu_irq;
+       int phy_irq;
+       int hw_phy_irq;
+       bool promisc_enabled;
+       u8 valid_polarity;
+       struct napi_struct napi;
+       struct mlxbf_gige_stats stats;
+};
+
+/* Rx Work Queue Element definitions */
+#define MLXBF_GIGE_RX_WQE_SZ                   8
+
+/* Rx Completion Queue Element definitions */
+#define MLXBF_GIGE_RX_CQE_SZ                   8
+#define MLXBF_GIGE_RX_CQE_PKT_LEN_MASK         GENMASK(10, 0)
+#define MLXBF_GIGE_RX_CQE_VALID_MASK           GENMASK(11, 11)
+#define MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK      GENMASK(15, 12)
+#define MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR   GENMASK(12, 12)
+#define MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED GENMASK(13, 13)
+#define MLXBF_GIGE_RX_CQE_CHKSUM_MASK          GENMASK(31, 16)
+
+/* Tx Work Queue Element definitions */
+#define MLXBF_GIGE_TX_WQE_SZ_QWORDS            2
+#define MLXBF_GIGE_TX_WQE_SZ                   16
+#define MLXBF_GIGE_TX_WQE_PKT_LEN_MASK         GENMASK(10, 0)
+#define MLXBF_GIGE_TX_WQE_UPDATE_MASK          GENMASK(31, 31)
+#define MLXBF_GIGE_TX_WQE_CHKSUM_LEN_MASK      GENMASK(42, 32)
+#define MLXBF_GIGE_TX_WQE_CHKSUM_START_MASK    GENMASK(55, 48)
+#define MLXBF_GIGE_TX_WQE_CHKSUM_OFFSET_MASK   GENMASK(63, 56)
+
+/* Macro to return packet length of specified TX WQE */
+#define MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr) \
+       (*((tx_wqe_addr) + 1) & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK)
+
+/* Tx Completion Count */
+#define MLXBF_GIGE_TX_CC_SZ                    8
+
+/* List of resources in ACPI table */
+enum mlxbf_gige_res {
+       MLXBF_GIGE_RES_MAC,
+       MLXBF_GIGE_RES_MDIO9,
+       MLXBF_GIGE_RES_GPIO0,
+       MLXBF_GIGE_RES_LLU,
+       MLXBF_GIGE_RES_PLU
+};
+
+/* Version of register data returned by mlxbf_gige_get_regs() */
+#define MLXBF_GIGE_REGS_VERSION 1
+
+int mlxbf_gige_mdio_probe(struct platform_device *pdev,
+                         struct mlxbf_gige *priv);
+void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
+irqreturn_t mlxbf_gige_mdio_handle_phy_interrupt(int irq, void *dev_id);
+void mlxbf_gige_mdio_enable_phy_int(struct mlxbf_gige *priv);
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+                                 unsigned int index, u64 dmac);
+void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+                                 unsigned int index, u64 *dmac);
+void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv);
+void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv);
+int mlxbf_gige_rx_init(struct mlxbf_gige *priv);
+void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv);
+int mlxbf_gige_tx_init(struct mlxbf_gige *priv);
+void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv);
+bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv);
+netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
+                                 struct net_device *netdev);
+struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
+                                    unsigned int map_len,
+                                    dma_addr_t *buf_dma,
+                                    enum dma_data_direction dir);
+int mlxbf_gige_request_irqs(struct mlxbf_gige *priv);
+void mlxbf_gige_free_irqs(struct mlxbf_gige *priv);
+int mlxbf_gige_poll(struct napi_struct *napi, int budget);
+extern const struct ethtool_ops mlxbf_gige_ethtool_ops;
+void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv);
+
+int mlxbf_gige_gpio_init(struct platform_device *pdev, struct mlxbf_gige *priv);
+void mlxbf_gige_gpio_free(struct mlxbf_gige *priv);
+
+#endif /* !defined(__MLXBF_GIGE_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
new file mode 100644 (file)
index 0000000..92b798f
--- /dev/null
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Ethtool support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/phy.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+/* Start of struct ethtool_ops functions */
+static int mlxbf_gige_get_regs_len(struct net_device *netdev)
+{
+       return MLXBF_GIGE_MMIO_REG_SZ;
+}
+
+static void mlxbf_gige_get_regs(struct net_device *netdev,
+                               struct ethtool_regs *regs, void *p)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       regs->version = MLXBF_GIGE_REGS_VERSION;
+
+       /* Read entire MMIO register space and store results
+        * into the provided buffer. Each 64-bit word is converted
+        * to big-endian to make the output more readable.
+        *
+        * NOTE: by design, a read to an offset without an existing
+        *       register will be acknowledged and return zero.
+        */
+       memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ);
+}
+
+static void mlxbf_gige_get_ringparam(struct net_device *netdev,
+                                    struct ethtool_ringparam *ering)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       ering->rx_max_pending = MLXBF_GIGE_MAX_RXQ_SZ;
+       ering->tx_max_pending = MLXBF_GIGE_MAX_TXQ_SZ;
+       ering->rx_pending = priv->rx_q_entries;
+       ering->tx_pending = priv->tx_q_entries;
+}
+
+static const struct {
+       const char string[ETH_GSTRING_LEN];
+} mlxbf_gige_ethtool_stats_keys[] = {
+       { "hw_access_errors" },
+       { "tx_invalid_checksums" },
+       { "tx_small_frames" },
+       { "tx_index_errors" },
+       { "sw_config_errors" },
+       { "sw_access_errors" },
+       { "rx_truncate_errors" },
+       { "rx_mac_errors" },
+       { "rx_din_dropped_pkts" },
+       { "tx_fifo_full" },
+       { "rx_filter_passed_pkts" },
+       { "rx_filter_discard_pkts" },
+};
+
+static int mlxbf_gige_get_sset_count(struct net_device *netdev, int stringset)
+{
+       if (stringset != ETH_SS_STATS)
+               return -EOPNOTSUPP;
+       return ARRAY_SIZE(mlxbf_gige_ethtool_stats_keys);
+}
+
+static void mlxbf_gige_get_strings(struct net_device *netdev, u32 stringset,
+                                  u8 *buf)
+{
+       if (stringset != ETH_SS_STATS)
+               return;
+       memcpy(buf, &mlxbf_gige_ethtool_stats_keys,
+              sizeof(mlxbf_gige_ethtool_stats_keys));
+}
+
+static void mlxbf_gige_get_ethtool_stats(struct net_device *netdev,
+                                        struct ethtool_stats *estats,
+                                        u64 *data)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       /* Fill data array with interface statistics
+        *
+        * NOTE: the data writes must be in
+        *       sync with the strings shown in
+        *       the mlxbf_gige_ethtool_stats_keys[] array
+        *
+        * NOTE2: certain statistics below are zeroed upon
+        *        port disable, so the calculation below
+        *        must include the "cached" value of the stat
+        *        plus the value read directly from hardware.
+        *        Cached statistics are currently:
+        *          rx_din_dropped_pkts
+        *          rx_filter_passed_pkts
+        *          rx_filter_discard_pkts
+        */
+       *data++ = priv->stats.hw_access_errors;
+       *data++ = priv->stats.tx_invalid_checksums;
+       *data++ = priv->stats.tx_small_frames;
+       *data++ = priv->stats.tx_index_errors;
+       *data++ = priv->stats.sw_config_errors;
+       *data++ = priv->stats.sw_access_errors;
+       *data++ = priv->stats.rx_truncate_errors;
+       *data++ = priv->stats.rx_mac_errors;
+       *data++ = (priv->stats.rx_din_dropped_pkts +
+                  readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER));
+       *data++ = priv->stats.tx_fifo_full;
+       *data++ = (priv->stats.rx_filter_passed_pkts +
+                  readq(priv->base + MLXBF_GIGE_RX_PASS_COUNTER_ALL));
+       *data++ = (priv->stats.rx_filter_discard_pkts +
+                  readq(priv->base + MLXBF_GIGE_RX_DISC_COUNTER_ALL));
+}
+
+static void mlxbf_gige_get_pauseparam(struct net_device *netdev,
+                                     struct ethtool_pauseparam *pause)
+{
+       pause->autoneg = AUTONEG_DISABLE;
+       pause->rx_pause = 1;
+       pause->tx_pause = 1;
+}
+
+const struct ethtool_ops mlxbf_gige_ethtool_ops = {
+       .get_link               = ethtool_op_get_link,
+       .get_ringparam          = mlxbf_gige_get_ringparam,
+       .get_regs_len           = mlxbf_gige_get_regs_len,
+       .get_regs               = mlxbf_gige_get_regs,
+       .get_strings            = mlxbf_gige_get_strings,
+       .get_sset_count         = mlxbf_gige_get_sset_count,
+       .get_ethtool_stats      = mlxbf_gige_get_ethtool_stats,
+       .nway_reset             = phy_ethtool_nway_reset,
+       .get_pauseparam         = mlxbf_gige_get_pauseparam,
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c
new file mode 100644 (file)
index 0000000..a8d966d
--- /dev/null
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Initialize and handle GPIO interrupt triggered by INT_N PHY signal.
+ * This GPIO interrupt triggers the PHY state machine to bring the link
+ * up/down.
+ *
+ * Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+#define MLXBF_GIGE_GPIO_CAUSE_FALL_EN          0x48
+#define MLXBF_GIGE_GPIO_CAUSE_OR_CAUSE_EVTEN0  0x80
+#define MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0                0x94
+#define MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE      0x98
+
+static void mlxbf_gige_gpio_enable(struct mlxbf_gige *priv)
+{
+       unsigned long flags;
+       u32 val;
+
+       spin_lock_irqsave(&priv->gpio_lock, flags);
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+       val |= priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+
+       /* The INT_N interrupt level is active low.
+        * So enable cause fall bit to detect when GPIO
+        * state goes low.
+        */
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_FALL_EN);
+       val |= priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_FALL_EN);
+
+       /* Enable PHY interrupt by setting the priority level */
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+       val |= priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+       spin_unlock_irqrestore(&priv->gpio_lock, flags);
+}
+
+static void mlxbf_gige_gpio_disable(struct mlxbf_gige *priv)
+{
+       unsigned long flags;
+       u32 val;
+
+       spin_lock_irqsave(&priv->gpio_lock, flags);
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+       val &= ~priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+       spin_unlock_irqrestore(&priv->gpio_lock, flags);
+}
+
+static irqreturn_t mlxbf_gige_gpio_handler(int irq, void *ptr)
+{
+       struct mlxbf_gige *priv;
+       u32 val;
+
+       priv = ptr;
+
+       /* Check if this interrupt is from PHY device.
+        * Return if it is not.
+        */
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CAUSE_EVTEN0);
+       if (!(val & priv->phy_int_gpio_mask))
+               return IRQ_NONE;
+
+       /* Clear interrupt when done, otherwise, no further interrupt
+        * will be triggered.
+        */
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+       val |= priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+
+       generic_handle_irq(priv->phy_irq);
+
+       return IRQ_HANDLED;
+}
+
+static void mlxbf_gige_gpio_mask(struct irq_data *irqd)
+{
+       struct mlxbf_gige *priv = irq_data_get_irq_chip_data(irqd);
+
+       mlxbf_gige_gpio_disable(priv);
+}
+
+static void mlxbf_gige_gpio_unmask(struct irq_data *irqd)
+{
+       struct mlxbf_gige *priv = irq_data_get_irq_chip_data(irqd);
+
+       mlxbf_gige_gpio_enable(priv);
+}
+
+static struct irq_chip mlxbf_gige_gpio_chip = {
+       .name                   = "mlxbf_gige_phy",
+       .irq_mask               = mlxbf_gige_gpio_mask,
+       .irq_unmask             = mlxbf_gige_gpio_unmask,
+};
+
+static int mlxbf_gige_gpio_domain_map(struct irq_domain *d,
+                                     unsigned int irq,
+                                     irq_hw_number_t hwirq)
+{
+       irq_set_chip_data(irq, d->host_data);
+       irq_set_chip_and_handler(irq, &mlxbf_gige_gpio_chip, handle_simple_irq);
+       irq_set_noprobe(irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops mlxbf_gige_gpio_domain_ops = {
+       .map    = mlxbf_gige_gpio_domain_map,
+       .xlate  = irq_domain_xlate_twocell,
+};
+
+#ifdef CONFIG_ACPI
+static int mlxbf_gige_gpio_resources(struct acpi_resource *ares,
+                                    void *data)
+{
+       struct acpi_resource_gpio *gpio;
+       u32 *phy_int_gpio = data;
+
+       if (ares->type == ACPI_RESOURCE_TYPE_GPIO) {
+               gpio = &ares->data.gpio;
+               *phy_int_gpio = gpio->pin_table[0];
+       }
+
+       return 1;
+}
+#endif
+
+void mlxbf_gige_gpio_free(struct mlxbf_gige *priv)
+{
+       irq_dispose_mapping(priv->phy_irq);
+       irq_domain_remove(priv->irqdomain);
+}
+
+int mlxbf_gige_gpio_init(struct platform_device *pdev,
+                        struct mlxbf_gige *priv)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       u32 phy_int_gpio = 0;
+       int ret;
+
+       LIST_HEAD(resources);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_GPIO0);
+       if (!res)
+               return -ENODEV;
+
+       priv->gpio_io = devm_ioremap(dev, res->start, resource_size(res));
+       if (!priv->gpio_io)
+               return -ENOMEM;
+
+#ifdef CONFIG_ACPI
+       ret = acpi_dev_get_resources(ACPI_COMPANION(dev),
+                                    &resources, mlxbf_gige_gpio_resources,
+                                    &phy_int_gpio);
+       acpi_dev_free_resource_list(&resources);
+       if (ret < 0 || !phy_int_gpio) {
+               dev_err(dev, "Error retrieving the gpio phy pin");
+               return -EINVAL;
+       }
+#endif
+
+       priv->phy_int_gpio_mask = BIT(phy_int_gpio);
+
+       mlxbf_gige_gpio_disable(priv);
+
+       priv->hw_phy_irq = platform_get_irq(pdev, MLXBF_GIGE_PHY_INT_N);
+
+       priv->irqdomain = irq_domain_add_simple(NULL, 1, 0,
+                                               &mlxbf_gige_gpio_domain_ops,
+                                               priv);
+       if (!priv->irqdomain) {
+               dev_err(dev, "Failed to add IRQ domain\n");
+               return -ENOMEM;
+       }
+
+       priv->phy_irq = irq_create_mapping(priv->irqdomain, 0);
+       if (!priv->phy_irq) {
+               irq_domain_remove(priv->irqdomain);
+               priv->irqdomain = NULL;
+               dev_err(dev, "Error mapping PHY IRQ\n");
+               return -EINVAL;
+       }
+
+       ret = devm_request_irq(dev, priv->hw_phy_irq, mlxbf_gige_gpio_handler,
+                              IRQF_ONESHOT | IRQF_SHARED, "mlxbf_gige_phy", priv);
+       if (ret) {
+               dev_err(dev, "Failed to request PHY IRQ");
+               mlxbf_gige_gpio_free(priv);
+               return ret;
+       }
+
+       return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c
new file mode 100644 (file)
index 0000000..c38795b
--- /dev/null
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Interrupt related logic for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/interrupt.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+static irqreturn_t mlxbf_gige_error_intr(int irq, void *dev_id)
+{
+       struct mlxbf_gige *priv;
+       u64 int_status;
+
+       priv = dev_id;
+
+       priv->error_intr_count++;
+
+       int_status = readq(priv->base + MLXBF_GIGE_INT_STATUS);
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR)
+               priv->stats.hw_access_errors++;
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_TX_CHECKSUM_INPUTS) {
+               priv->stats.tx_invalid_checksums++;
+               /* This error condition is latched into MLXBF_GIGE_INT_STATUS
+                * when the GigE silicon operates on the offending
+                * TX WQE. The write to MLXBF_GIGE_INT_STATUS at the bottom
+                * of this routine clears this error condition.
+                */
+       }
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_TX_SMALL_FRAME_SIZE) {
+               priv->stats.tx_small_frames++;
+               /* This condition happens when the networking stack invokes
+                * this driver's "start_xmit()" method with a packet whose
+                * size < 60 bytes.  The GigE silicon will automatically pad
+                * this small frame up to a minimum-sized frame before it is
+                * sent. The "tx_small_frame" condition is latched into the
+                * MLXBF_GIGE_INT_STATUS register when the GigE silicon
+                * operates on the offending TX WQE. The write to
+                * MLXBF_GIGE_INT_STATUS at the bottom of this routine
+                * clears this condition.
+                */
+       }
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_TX_PI_CI_EXCEED_WQ_SIZE)
+               priv->stats.tx_index_errors++;
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_SW_CONFIG_ERROR)
+               priv->stats.sw_config_errors++;
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_SW_ACCESS_ERROR)
+               priv->stats.sw_access_errors++;
+
+       /* Clear all error interrupts by writing '1' back to
+        * all the asserted bits in INT_STATUS.  Do not write
+        * '1' back to 'receive packet' bit, since that is
+        * managed separately.
+        */
+
+       int_status &= ~MLXBF_GIGE_INT_STATUS_RX_RECEIVE_PACKET;
+
+       writeq(int_status, priv->base + MLXBF_GIGE_INT_STATUS);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t mlxbf_gige_rx_intr(int irq, void *dev_id)
+{
+       struct mlxbf_gige *priv;
+
+       priv = dev_id;
+
+       priv->rx_intr_count++;
+
+       /* NOTE: GigE silicon automatically disables "packet rx" interrupt by
+        *       setting MLXBF_GIGE_INT_MASK bit0 upon triggering the interrupt
+        *       to the ARM cores.  Software needs to re-enable "packet rx"
+        *       interrupts by clearing MLXBF_GIGE_INT_MASK bit0.
+        */
+
+       napi_schedule(&priv->napi);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t mlxbf_gige_llu_plu_intr(int irq, void *dev_id)
+{
+       struct mlxbf_gige *priv;
+
+       priv = dev_id;
+       priv->llu_plu_intr_count++;
+
+       return IRQ_HANDLED;
+}
+
+int mlxbf_gige_request_irqs(struct mlxbf_gige *priv)
+{
+       int err;
+
+       err = request_irq(priv->error_irq, mlxbf_gige_error_intr, 0,
+                         "mlxbf_gige_error", priv);
+       if (err) {
+               dev_err(priv->dev, "Request error_irq failure\n");
+               return err;
+       }
+
+       err = request_irq(priv->rx_irq, mlxbf_gige_rx_intr, 0,
+                         "mlxbf_gige_rx", priv);
+       if (err) {
+               dev_err(priv->dev, "Request rx_irq failure\n");
+               goto free_error_irq;
+       }
+
+       err = request_irq(priv->llu_plu_irq, mlxbf_gige_llu_plu_intr, 0,
+                         "mlxbf_gige_llu_plu", priv);
+       if (err) {
+               dev_err(priv->dev, "Request llu_plu_irq failure\n");
+               goto free_rx_irq;
+       }
+
+       return 0;
+
+free_rx_irq:
+       free_irq(priv->rx_irq, priv);
+
+free_error_irq:
+       free_irq(priv->error_irq, priv);
+
+       return err;
+}
+
+void mlxbf_gige_free_irqs(struct mlxbf_gige *priv)
+{
+       free_irq(priv->error_irq, priv);
+       free_irq(priv->rx_irq, priv);
+       free_irq(priv->llu_plu_irq, priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
new file mode 100644 (file)
index 0000000..a0a059e
--- /dev/null
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Gigabit Ethernet driver for Mellanox BlueField SoC
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+#define DRV_NAME    "mlxbf_gige"
+
+/* Allocate SKB whose payload pointer aligns with the Bluefield
+ * hardware DMA limitation, i.e. DMA operation can't cross
+ * a 4KB boundary.  A maximum packet size of 2KB is assumed in the
+ * alignment formula.  The alignment logic overallocates an SKB,
+ * and then adjusts the headroom so that the SKB data pointer is
+ * naturally aligned to a 2KB boundary.
+ */
+struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
+                                    unsigned int map_len,
+                                    dma_addr_t *buf_dma,
+                                    enum dma_data_direction dir)
+{
+       struct sk_buff *skb;
+       u64 addr, offset;
+
+       /* Overallocate the SKB so that any headroom adjustment (to
+        * provide 2KB natural alignment) does not exceed payload area
+        */
+       skb = netdev_alloc_skb(priv->netdev, MLXBF_GIGE_DEFAULT_BUF_SZ * 2);
+       if (!skb)
+               return NULL;
+
+       /* Adjust the headroom so that skb->data is naturally aligned to
+        * a 2KB boundary, which is the maximum packet size supported.
+        */
+       addr = (long)skb->data;
+       offset = (addr + MLXBF_GIGE_DEFAULT_BUF_SZ - 1) &
+               ~(MLXBF_GIGE_DEFAULT_BUF_SZ - 1);
+       offset -= addr;
+       if (offset)
+               skb_reserve(skb, offset);
+
+       /* Return streaming DMA mapping to caller */
+       *buf_dma = dma_map_single(priv->dev, skb->data, map_len, dir);
+       if (dma_mapping_error(priv->dev, *buf_dma)) {
+               dev_kfree_skb(skb);
+               *buf_dma = (dma_addr_t)0;
+               return NULL;
+       }
+
+       return skb;
+}
+
+static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
+{
+       u8 mac[ETH_ALEN];
+       u64 local_mac;
+
+       memset(mac, 0, ETH_ALEN);
+       mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
+                                    &local_mac);
+       u64_to_ether_addr(local_mac, mac);
+
+       if (is_valid_ether_addr(mac)) {
+               ether_addr_copy(priv->netdev->dev_addr, mac);
+       } else {
+               /* Provide a random MAC if for some reason the device has
+                * not been configured with a valid MAC address already.
+                */
+               eth_hw_addr_random(priv->netdev);
+       }
+
+       local_mac = ether_addr_to_u64(priv->netdev->dev_addr);
+       mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
+                                    local_mac);
+}
+
+static void mlxbf_gige_cache_stats(struct mlxbf_gige *priv)
+{
+       struct mlxbf_gige_stats *p;
+
+       /* Cache stats that will be cleared by clean port operation */
+       p = &priv->stats;
+       p->rx_din_dropped_pkts += readq(priv->base +
+                                       MLXBF_GIGE_RX_DIN_DROP_COUNTER);
+       p->rx_filter_passed_pkts += readq(priv->base +
+                                         MLXBF_GIGE_RX_PASS_COUNTER_ALL);
+       p->rx_filter_discard_pkts += readq(priv->base +
+                                          MLXBF_GIGE_RX_DISC_COUNTER_ALL);
+}
+
+static int mlxbf_gige_clean_port(struct mlxbf_gige *priv)
+{
+       u64 control;
+       u64 temp;
+       int err;
+
+       /* Set the CLEAN_PORT_EN bit to trigger SW reset */
+       control = readq(priv->base + MLXBF_GIGE_CONTROL);
+       control |= MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
+       writeq(control, priv->base + MLXBF_GIGE_CONTROL);
+
+       /* Ensure completion of "clean port" write before polling status */
+       mb();
+
+       err = readq_poll_timeout_atomic(priv->base + MLXBF_GIGE_STATUS, temp,
+                                       (temp & MLXBF_GIGE_STATUS_READY),
+                                       100, 100000);
+
+       /* Clear the CLEAN_PORT_EN bit at end of this loop */
+       control = readq(priv->base + MLXBF_GIGE_CONTROL);
+       control &= ~MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
+       writeq(control, priv->base + MLXBF_GIGE_CONTROL);
+
+       return err;
+}
+
+static int mlxbf_gige_open(struct net_device *netdev)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+       struct phy_device *phydev = netdev->phydev;
+       u64 int_en;
+       int err;
+
+       err = mlxbf_gige_request_irqs(priv);
+       if (err)
+               return err;
+       mlxbf_gige_cache_stats(priv);
+       err = mlxbf_gige_clean_port(priv);
+       if (err)
+               goto free_irqs;
+       err = mlxbf_gige_rx_init(priv);
+       if (err)
+               goto free_irqs;
+       err = mlxbf_gige_tx_init(priv);
+       if (err)
+               goto rx_deinit;
+
+       phy_start(phydev);
+
+       netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
+       napi_enable(&priv->napi);
+       netif_start_queue(netdev);
+
+       /* Set bits in INT_EN that we care about */
+       int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
+                MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
+                MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE |
+                MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE |
+                MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR |
+                MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR |
+                MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET;
+
+       /* Ensure completion of all initialization before enabling interrupts */
+       mb();
+
+       writeq(int_en, priv->base + MLXBF_GIGE_INT_EN);
+
+       return 0;
+
+rx_deinit:
+       mlxbf_gige_rx_deinit(priv);
+
+free_irqs:
+       mlxbf_gige_free_irqs(priv);
+       return err;
+}
+
+static int mlxbf_gige_stop(struct net_device *netdev)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       writeq(0, priv->base + MLXBF_GIGE_INT_EN);
+       netif_stop_queue(netdev);
+       napi_disable(&priv->napi);
+       netif_napi_del(&priv->napi);
+       mlxbf_gige_free_irqs(priv);
+
+       phy_stop(netdev->phydev);
+
+       mlxbf_gige_rx_deinit(priv);
+       mlxbf_gige_tx_deinit(priv);
+       mlxbf_gige_cache_stats(priv);
+       mlxbf_gige_clean_port(priv);
+
+       return 0;
+}
+
+static int mlxbf_gige_do_ioctl(struct net_device *netdev,
+                              struct ifreq *ifr, int cmd)
+{
+       if (!(netif_running(netdev)))
+               return -EINVAL;
+
+       return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void mlxbf_gige_set_rx_mode(struct net_device *netdev)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+       bool new_promisc_enabled;
+
+       new_promisc_enabled = netdev->flags & IFF_PROMISC;
+
+       /* Only write to the hardware registers if the new setting
+        * of promiscuous mode is different from the current one.
+        */
+       if (new_promisc_enabled != priv->promisc_enabled) {
+               priv->promisc_enabled = new_promisc_enabled;
+
+               if (new_promisc_enabled)
+                       mlxbf_gige_enable_promisc(priv);
+               else
+                       mlxbf_gige_disable_promisc(priv);
+       }
+}
+
+static void mlxbf_gige_get_stats64(struct net_device *netdev,
+                                  struct rtnl_link_stats64 *stats)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       netdev_stats_to_stats64(stats, &netdev->stats);
+
+       stats->rx_length_errors = priv->stats.rx_truncate_errors;
+       stats->rx_fifo_errors = priv->stats.rx_din_dropped_pkts +
+                               readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
+       stats->rx_crc_errors = priv->stats.rx_mac_errors;
+       stats->rx_errors = stats->rx_length_errors +
+                          stats->rx_fifo_errors +
+                          stats->rx_crc_errors;
+
+       stats->tx_fifo_errors = priv->stats.tx_fifo_full;
+       stats->tx_errors = stats->tx_fifo_errors;
+}
+
+static const struct net_device_ops mlxbf_gige_netdev_ops = {
+       .ndo_open               = mlxbf_gige_open,
+       .ndo_stop               = mlxbf_gige_stop,
+       .ndo_start_xmit         = mlxbf_gige_start_xmit,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = mlxbf_gige_do_ioctl,
+       .ndo_set_rx_mode        = mlxbf_gige_set_rx_mode,
+       .ndo_get_stats64        = mlxbf_gige_get_stats64,
+};
+
+static void mlxbf_gige_adjust_link(struct net_device *netdev)
+{
+       struct phy_device *phydev = netdev->phydev;
+
+       phy_print_status(phydev);
+}
+
+static int mlxbf_gige_probe(struct platform_device *pdev)
+{
+       struct phy_device *phydev;
+       struct net_device *netdev;
+       struct resource *mac_res;
+       struct resource *llu_res;
+       struct resource *plu_res;
+       struct mlxbf_gige *priv;
+       void __iomem *llu_base;
+       void __iomem *plu_base;
+       void __iomem *base;
+       u64 control;
+       int addr;
+       int err;
+
+       mac_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MAC);
+       if (!mac_res)
+               return -ENXIO;
+
+       base = devm_ioremap_resource(&pdev->dev, mac_res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       llu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_LLU);
+       if (!llu_res)
+               return -ENXIO;
+
+       llu_base = devm_ioremap_resource(&pdev->dev, llu_res);
+       if (IS_ERR(llu_base))
+               return PTR_ERR(llu_base);
+
+       plu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_PLU);
+       if (!plu_res)
+               return -ENXIO;
+
+       plu_base = devm_ioremap_resource(&pdev->dev, plu_res);
+       if (IS_ERR(plu_base))
+               return PTR_ERR(plu_base);
+
+       /* Perform general init of GigE block */
+       control = readq(base + MLXBF_GIGE_CONTROL);
+       control |= MLXBF_GIGE_CONTROL_PORT_EN;
+       writeq(control, base + MLXBF_GIGE_CONTROL);
+
+       netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
+       if (!netdev)
+               return -ENOMEM;
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+       netdev->netdev_ops = &mlxbf_gige_netdev_ops;
+       netdev->ethtool_ops = &mlxbf_gige_ethtool_ops;
+       priv = netdev_priv(netdev);
+       priv->netdev = netdev;
+
+       platform_set_drvdata(pdev, priv);
+       priv->dev = &pdev->dev;
+       priv->pdev = pdev;
+
+       spin_lock_init(&priv->lock);
+       spin_lock_init(&priv->gpio_lock);
+
+       /* Attach MDIO device */
+       err = mlxbf_gige_mdio_probe(pdev, priv);
+       if (err)
+               return err;
+
+       err = mlxbf_gige_gpio_init(pdev, priv);
+       if (err) {
+               dev_err(&pdev->dev, "PHY IRQ initialization failed\n");
+               mlxbf_gige_mdio_remove(priv);
+               return -ENODEV;
+       }
+
+       priv->base = base;
+       priv->llu_base = llu_base;
+       priv->plu_base = plu_base;
+
+       priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
+       priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
+
+       /* Write initial MAC address to hardware */
+       mlxbf_gige_initial_mac(priv);
+
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (err) {
+               dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+               goto out;
+       }
+
+       priv->error_irq = platform_get_irq(pdev, MLXBF_GIGE_ERROR_INTR_IDX);
+       priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
+       priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
+
+       phydev = phy_find_first(priv->mdiobus);
+       if (!phydev) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       addr = phydev->mdio.addr;
+       priv->mdiobus->irq[addr] = priv->phy_irq;
+       phydev->irq = priv->phy_irq;
+
+       err = phy_connect_direct(netdev, phydev,
+                                mlxbf_gige_adjust_link,
+                                PHY_INTERFACE_MODE_GMII);
+       if (err) {
+               dev_err(&pdev->dev, "Could not attach to PHY\n");
+               goto out;
+       }
+
+       /* MAC only supports 1000T full duplex mode */
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+
+       /* Only symmetric pause with flow control enabled is supported so no
+        * need to negotiate pause.
+        */
+       linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
+       linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+
+       /* Display information about attached PHY device */
+       phy_attached_info(phydev);
+
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register netdev\n");
+               phy_disconnect(phydev);
+               goto out;
+       }
+
+       return 0;
+
+out:
+       mlxbf_gige_gpio_free(priv);
+       mlxbf_gige_mdio_remove(priv);
+       return err;
+}
+
+static int mlxbf_gige_remove(struct platform_device *pdev)
+{
+       struct mlxbf_gige *priv = platform_get_drvdata(pdev);
+
+       unregister_netdev(priv->netdev);
+       phy_disconnect(priv->netdev->phydev);
+       mlxbf_gige_gpio_free(priv);
+       mlxbf_gige_mdio_remove(priv);
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static void mlxbf_gige_shutdown(struct platform_device *pdev)
+{
+       struct mlxbf_gige *priv = platform_get_drvdata(pdev);
+
+       writeq(0, priv->base + MLXBF_GIGE_INT_EN);
+       mlxbf_gige_clean_port(priv);
+}
+
+static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
+       { "MLNXBF17", 0 },
+       {},
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
+
+static struct platform_driver mlxbf_gige_driver = {
+       .probe = mlxbf_gige_probe,
+       .remove = mlxbf_gige_remove,
+       .shutdown = mlxbf_gige_shutdown,
+       .driver = {
+               .name = DRV_NAME,
+               .acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
+       },
+};
+
+module_platform_driver(mlxbf_gige_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField SoC Gigabit Ethernet Driver");
+MODULE_AUTHOR("David Thompson <davthompson@nvidia.com>");
+MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
new file mode 100644 (file)
index 0000000..e32dd34
--- /dev/null
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* MDIO support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "mlxbf_gige.h"
+
+#define MLXBF_GIGE_MDIO_GW_OFFSET      0x0
+#define MLXBF_GIGE_MDIO_CFG_OFFSET     0x4
+
+/* Support clause 22 */
+#define MLXBF_GIGE_MDIO_CL22_ST1       0x1
+#define MLXBF_GIGE_MDIO_CL22_WRITE     0x1
+#define MLXBF_GIGE_MDIO_CL22_READ      0x2
+
+/* Busy bit is set by software and cleared by hardware */
+#define MLXBF_GIGE_MDIO_SET_BUSY       0x1
+
+/* MDIO GW register bits */
+#define MLXBF_GIGE_MDIO_GW_AD_MASK     GENMASK(15, 0)
+#define MLXBF_GIGE_MDIO_GW_DEVAD_MASK  GENMASK(20, 16)
+#define MLXBF_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21)
+#define MLXBF_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26)
+#define MLXBF_GIGE_MDIO_GW_ST1_MASK    GENMASK(28, 28)
+#define MLXBF_GIGE_MDIO_GW_BUSY_MASK   GENMASK(30, 30)
+
+/* MDIO config register bits */
+#define MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK             GENMASK(1, 0)
+#define MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK               GENMASK(2, 2)
+#define MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK       GENMASK(4, 4)
+#define MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK            GENMASK(15, 8)
+#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK          GENMASK(23, 16)
+#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK         GENMASK(31, 24)
+
+/* Formula for encoding the MDIO period. The encoded value is
+ * passed to the MDIO config register.
+ *
+ * mdc_clk = 2*(val + 1)*i1clk
+ *
+ * 400 ns = 2*(val + 1)*(((1/430)*1000) ns)
+ *
+ * val = (((400 * 430 / 1000) / 2) - 1)
+ */
+#define MLXBF_GIGE_I1CLK_MHZ           430
+#define MLXBF_GIGE_MDC_CLK_NS          400
+
+#define MLXBF_GIGE_MDIO_PERIOD (((MLXBF_GIGE_MDC_CLK_NS * MLXBF_GIGE_I1CLK_MHZ / 1000) / 2) - 1)
+
+#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, \
+                                           MLXBF_GIGE_MDIO_PERIOD) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
+                                     int phy_reg, u32 opcode)
+{
+       u32 gw_reg = 0;
+
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_AD_MASK, data);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_DEVAD_MASK, phy_reg);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_PARTAD_MASK, phy_add);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_OPCODE_MASK, opcode);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_ST1_MASK,
+                            MLXBF_GIGE_MDIO_CL22_ST1);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_BUSY_MASK,
+                            MLXBF_GIGE_MDIO_SET_BUSY);
+
+       return gw_reg;
+}
+
+static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
+{
+       struct mlxbf_gige *priv = bus->priv;
+       u32 cmd;
+       int ret;
+       u32 val;
+
+       if (phy_reg & MII_ADDR_C45)
+               return -EOPNOTSUPP;
+
+       /* Send mdio read request */
+       cmd = mlxbf_gige_mdio_create_cmd(0, phy_add, phy_reg, MLXBF_GIGE_MDIO_CL22_READ);
+
+       writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
+       ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
+                                       val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
+
+       if (ret) {
+               writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+               return ret;
+       }
+
+       ret = readl(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+       /* Only return ad bits of the gw register */
+       ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+
+       return ret;
+}
+
+static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
+                                int phy_reg, u16 val)
+{
+       struct mlxbf_gige *priv = bus->priv;
+       u32 cmd;
+       int ret;
+       u32 temp;
+
+       if (phy_reg & MII_ADDR_C45)
+               return -EOPNOTSUPP;
+
+       /* Send mdio write request */
+       cmd = mlxbf_gige_mdio_create_cmd(val, phy_add, phy_reg,
+                                        MLXBF_GIGE_MDIO_CL22_WRITE);
+       writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
+       /* If the poll timed out, drop the request */
+       ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
+                                       temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
+
+       return ret;
+}
+
+int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int ret;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MDIO9);
+       if (!res)
+               return -ENODEV;
+
+       priv->mdio_io = devm_ioremap_resource(dev, res);
+       if (IS_ERR(priv->mdio_io))
+               return PTR_ERR(priv->mdio_io);
+
+       /* Configure mdio parameters */
+       writel(MLXBF_GIGE_MDIO_CFG_VAL,
+              priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+
+       priv->mdiobus = devm_mdiobus_alloc(dev);
+       if (!priv->mdiobus) {
+               dev_err(dev, "Failed to alloc MDIO bus\n");
+               return -ENOMEM;
+       }
+
+       priv->mdiobus->name = "mlxbf-mdio";
+       priv->mdiobus->read = mlxbf_gige_mdio_read;
+       priv->mdiobus->write = mlxbf_gige_mdio_write;
+       priv->mdiobus->parent = dev;
+       priv->mdiobus->priv = priv;
+       snprintf(priv->mdiobus->id, MII_BUS_ID_SIZE, "%s",
+                dev_name(dev));
+
+       ret = mdiobus_register(priv->mdiobus);
+       if (ret)
+               dev_err(dev, "Failed to register MDIO bus\n");
+
+       return ret;
+}
+
+void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv)
+{
+       mdiobus_unregister(priv->mdiobus);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
new file mode 100644 (file)
index 0000000..5fb33c9
--- /dev/null
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* Header file for Mellanox BlueField GigE register defines
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#ifndef __MLXBF_GIGE_REGS_H__
+#define __MLXBF_GIGE_REGS_H__
+
+#define MLXBF_GIGE_STATUS                             0x0010
+#define MLXBF_GIGE_STATUS_READY                       BIT(0)
+#define MLXBF_GIGE_INT_STATUS                         0x0028
+#define MLXBF_GIGE_INT_STATUS_RX_RECEIVE_PACKET       BIT(0)
+#define MLXBF_GIGE_INT_STATUS_RX_MAC_ERROR            BIT(1)
+#define MLXBF_GIGE_INT_STATUS_RX_TRN_ERROR            BIT(2)
+#define MLXBF_GIGE_INT_STATUS_SW_ACCESS_ERROR         BIT(3)
+#define MLXBF_GIGE_INT_STATUS_SW_CONFIG_ERROR         BIT(4)
+#define MLXBF_GIGE_INT_STATUS_TX_PI_CI_EXCEED_WQ_SIZE BIT(5)
+#define MLXBF_GIGE_INT_STATUS_TX_SMALL_FRAME_SIZE     BIT(6)
+#define MLXBF_GIGE_INT_STATUS_TX_CHECKSUM_INPUTS      BIT(7)
+#define MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR         BIT(8)
+#define MLXBF_GIGE_INT_EN                             0x0030
+#define MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET           BIT(0)
+#define MLXBF_GIGE_INT_EN_RX_MAC_ERROR                BIT(1)
+#define MLXBF_GIGE_INT_EN_RX_TRN_ERROR                BIT(2)
+#define MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR             BIT(3)
+#define MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR             BIT(4)
+#define MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE     BIT(5)
+#define MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE         BIT(6)
+#define MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS          BIT(7)
+#define MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR             BIT(8)
+#define MLXBF_GIGE_INT_MASK                           0x0038
+#define MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET         BIT(0)
+#define MLXBF_GIGE_CONTROL                            0x0040
+#define MLXBF_GIGE_CONTROL_PORT_EN                    BIT(0)
+#define MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN            BIT(1)
+#define MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC            BIT(4)
+#define MLXBF_GIGE_CONTROL_CLEAN_PORT_EN              BIT(31)
+#define MLXBF_GIGE_RX_WQ_BASE                         0x0200
+#define MLXBF_GIGE_RX_WQE_SIZE_LOG2                   0x0208
+#define MLXBF_GIGE_RX_WQE_SIZE_LOG2_RESET_VAL         7
+#define MLXBF_GIGE_RX_CQ_BASE                         0x0210
+#define MLXBF_GIGE_TX_WQ_BASE                         0x0218
+#define MLXBF_GIGE_TX_WQ_SIZE_LOG2                    0x0220
+#define MLXBF_GIGE_TX_WQ_SIZE_LOG2_RESET_VAL          7
+#define MLXBF_GIGE_TX_CI_UPDATE_ADDRESS               0x0228
+#define MLXBF_GIGE_RX_WQE_PI                          0x0230
+#define MLXBF_GIGE_TX_PRODUCER_INDEX                  0x0238
+#define MLXBF_GIGE_RX_MAC_FILTER                      0x0240
+#define MLXBF_GIGE_RX_MAC_FILTER_STRIDE               0x0008
+#define MLXBF_GIGE_RX_DIN_DROP_COUNTER                0x0260
+#define MLXBF_GIGE_TX_CONSUMER_INDEX                  0x0310
+#define MLXBF_GIGE_TX_CONTROL                         0x0318
+#define MLXBF_GIGE_TX_CONTROL_GRACEFUL_STOP           BIT(0)
+#define MLXBF_GIGE_TX_STATUS                          0x0388
+#define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL           BIT(1)
+#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START     0x0520
+#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END       0x0528
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC           0x0540
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN        BIT(0)
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS           0x0548
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN        BIT(0)
+#define MLXBF_GIGE_RX_PASS_COUNTER_ALL                0x0550
+#define MLXBF_GIGE_RX_DISC_COUNTER_ALL                0x0560
+#define MLXBF_GIGE_RX                                 0x0578
+#define MLXBF_GIGE_RX_STRIP_CRC_EN                    BIT(1)
+#define MLXBF_GIGE_RX_DMA                             0x0580
+#define MLXBF_GIGE_RX_DMA_EN                          BIT(0)
+#define MLXBF_GIGE_RX_CQE_PACKET_CI                   0x05b0
+#define MLXBF_GIGE_MAC_CFG                            0x05e8
+
+/* NOTE: MLXBF_GIGE_MAC_CFG is the last defined register offset,
+ * so use that plus size of single register to derive total size
+ */
+#define MLXBF_GIGE_MMIO_REG_SZ                        (MLXBF_GIGE_MAC_CFG + 8)
+
+#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
new file mode 100644 (file)
index 0000000..afa3b92
--- /dev/null
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Packet receive logic for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+                                 unsigned int index, u64 dmac)
+{
+       void __iomem *base = priv->base;
+       u64 control;
+
+       /* Write destination MAC to specified MAC RX filter */
+       writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
+              (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+
+       /* Enable MAC receive filter mask for specified index */
+       control = readq(base + MLXBF_GIGE_CONTROL);
+       control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
+       writeq(control, base + MLXBF_GIGE_CONTROL);
+}
+
+void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+                                 unsigned int index, u64 *dmac)
+{
+       void __iomem *base = priv->base;
+
+       /* Read destination MAC from specified MAC RX filter */
+       *dmac = readq(base + MLXBF_GIGE_RX_MAC_FILTER +
+                     (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+}
+
+void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv)
+{
+       void __iomem *base = priv->base;
+       u64 control;
+       u64 end_mac;
+
+       /* Enable MAC_ID_RANGE match functionality */
+       control = readq(base + MLXBF_GIGE_CONTROL);
+       control |= MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
+       writeq(control, base + MLXBF_GIGE_CONTROL);
+
+       /* Set start of destination MAC range check to 0 */
+       writeq(0, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START);
+
+       /* Set end of destination MAC range check to all FFs */
+       end_mac = BCAST_MAC_ADDR;
+       writeq(end_mac, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END);
+}
+
+void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv)
+{
+       void __iomem *base = priv->base;
+       u64 control;
+
+       /* Disable MAC_ID_RANGE match functionality */
+       control = readq(base + MLXBF_GIGE_CONTROL);
+       control &= ~MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
+       writeq(control, base + MLXBF_GIGE_CONTROL);
+
+       /* NOTE: no need to change DMAC_RANGE_START or END;
+        * those values are ignored since MAC_ID_RANGE_EN=0
+        */
+}
+
+/* Receive Initialization
+ * 1) Configures RX MAC filters via MMIO registers
+ * 2) Allocates RX WQE array using coherent DMA mapping
+ * 3) Initializes each element of RX WQE array with a receive
+ *    buffer pointer (also using coherent DMA mapping)
+ * 4) Allocates RX CQE array using coherent DMA mapping
+ * 5) Completes other misc receive initialization
+ */
+int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
+{
+       size_t wq_size, cq_size;
+       dma_addr_t *rx_wqe_ptr;
+       dma_addr_t rx_buf_dma;
+       u64 data;
+       int i, j;
+
+       /* Configure MAC RX filter #0 to allow RX of broadcast pkts */
+       mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX,
+                                    BCAST_MAC_ADDR);
+
+       wq_size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
+       priv->rx_wqe_base = dma_alloc_coherent(priv->dev, wq_size,
+                                              &priv->rx_wqe_base_dma,
+                                              GFP_KERNEL);
+       if (!priv->rx_wqe_base)
+               return -ENOMEM;
+
+       /* Initialize 'rx_wqe_ptr' to point to first RX WQE in array
+        * Each RX WQE is simply a receive buffer pointer, so walk
+        * the entire array, allocating a 2KB buffer for each element
+        */
+       rx_wqe_ptr = priv->rx_wqe_base;
+
+       for (i = 0; i < priv->rx_q_entries; i++) {
+               priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
+                                                      &rx_buf_dma, DMA_FROM_DEVICE);
+               if (!priv->rx_skb[i])
+                       goto free_wqe_and_skb;
+               *rx_wqe_ptr++ = rx_buf_dma;
+       }
+
+       /* Write RX WQE base address into MMIO reg */
+       writeq(priv->rx_wqe_base_dma, priv->base + MLXBF_GIGE_RX_WQ_BASE);
+
+       cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
+       priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size,
+                                              &priv->rx_cqe_base_dma,
+                                              GFP_KERNEL);
+       if (!priv->rx_cqe_base)
+               goto free_wqe_and_skb;
+
+       for (i = 0; i < priv->rx_q_entries; i++)
+               priv->rx_cqe_base[i] |= MLXBF_GIGE_RX_CQE_VALID_MASK;
+
+       /* Write RX CQE base address into MMIO reg */
+       writeq(priv->rx_cqe_base_dma, priv->base + MLXBF_GIGE_RX_CQ_BASE);
+
+       /* Write RX_WQE_PI with current number of replenished buffers */
+       writeq(priv->rx_q_entries, priv->base + MLXBF_GIGE_RX_WQE_PI);
+
+       /* Enable removal of CRC during RX */
+       data = readq(priv->base + MLXBF_GIGE_RX);
+       data |= MLXBF_GIGE_RX_STRIP_CRC_EN;
+       writeq(data, priv->base + MLXBF_GIGE_RX);
+
+       /* Enable RX MAC filter pass and discard counters */
+       writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN,
+              priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC);
+       writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
+              priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
+
+       /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
+        * indicate readiness to receive interrupts
+        */
+       data = readq(priv->base + MLXBF_GIGE_INT_MASK);
+       data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
+       writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
+
+       /* Enable RX DMA to write new packets to memory */
+       data = readq(priv->base + MLXBF_GIGE_RX_DMA);
+       data |= MLXBF_GIGE_RX_DMA_EN;
+       writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+
+       writeq(ilog2(priv->rx_q_entries),
+              priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
+
+       return 0;
+
+free_wqe_and_skb:
+       rx_wqe_ptr = priv->rx_wqe_base;
+       for (j = 0; j < i; j++) {
+               dma_unmap_single(priv->dev, *rx_wqe_ptr,
+                                MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+               dev_kfree_skb(priv->rx_skb[j]);
+               rx_wqe_ptr++;
+       }
+       dma_free_coherent(priv->dev, wq_size,
+                         priv->rx_wqe_base, priv->rx_wqe_base_dma);
+       return -ENOMEM;
+}
+
+/* Receive Deinitialization
+ * This routine will free allocations done by mlxbf_gige_rx_init(),
+ * namely the RX WQE and RX CQE arrays, as well as all RX buffers
+ */
+void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
+{
+       dma_addr_t *rx_wqe_ptr;
+       size_t size;
+       u64 data;
+       int i;
+
+       /* Disable RX DMA to prevent packet transfers to memory */
+       data = readq(priv->base + MLXBF_GIGE_RX_DMA);
+       data &= ~MLXBF_GIGE_RX_DMA_EN;
+       writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+
+       rx_wqe_ptr = priv->rx_wqe_base;
+
+       for (i = 0; i < priv->rx_q_entries; i++) {
+               dma_unmap_single(priv->dev, *rx_wqe_ptr, MLXBF_GIGE_DEFAULT_BUF_SZ,
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb(priv->rx_skb[i]);
+               rx_wqe_ptr++;
+       }
+
+       size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
+       dma_free_coherent(priv->dev, size,
+                         priv->rx_wqe_base, priv->rx_wqe_base_dma);
+
+       size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
+       dma_free_coherent(priv->dev, size,
+                         priv->rx_cqe_base, priv->rx_cqe_base_dma);
+
+       priv->rx_wqe_base = NULL;
+       priv->rx_wqe_base_dma = 0;
+       priv->rx_cqe_base = NULL;
+       priv->rx_cqe_base_dma = 0;
+       writeq(0, priv->base + MLXBF_GIGE_RX_WQ_BASE);
+       writeq(0, priv->base + MLXBF_GIGE_RX_CQ_BASE);
+}
+
+static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
+{
+       struct net_device *netdev = priv->netdev;
+       struct sk_buff *skb = NULL, *rx_skb;
+       u16 rx_pi_rem, rx_ci_rem;
+       dma_addr_t *rx_wqe_addr;
+       dma_addr_t rx_buf_dma;
+       u64 *rx_cqe_addr;
+       u64 datalen;
+       u64 rx_cqe;
+       u16 rx_ci;
+       u16 rx_pi;
+
+       /* Index into RX buffer array is rx_pi w/wrap based on RX_CQE_SIZE */
+       rx_pi = readq(priv->base + MLXBF_GIGE_RX_WQE_PI);
+       rx_pi_rem = rx_pi % priv->rx_q_entries;
+
+       rx_wqe_addr = priv->rx_wqe_base + rx_pi_rem;
+       rx_cqe_addr = priv->rx_cqe_base + rx_pi_rem;
+       rx_cqe = *rx_cqe_addr;
+
+       if ((!!(rx_cqe & MLXBF_GIGE_RX_CQE_VALID_MASK)) != priv->valid_polarity)
+               return false;
+
+       if ((rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK) == 0) {
+               /* Packet is OK, increment stats */
+               datalen = rx_cqe & MLXBF_GIGE_RX_CQE_PKT_LEN_MASK;
+               netdev->stats.rx_packets++;
+               netdev->stats.rx_bytes += datalen;
+
+               skb = priv->rx_skb[rx_pi_rem];
+
+               skb_put(skb, datalen);
+
+               skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
+
+               skb->protocol = eth_type_trans(skb, netdev);
+
+               /* Alloc another RX SKB for this same index */
+               rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
+                                             &rx_buf_dma, DMA_FROM_DEVICE);
+               if (!rx_skb)
+                       return false;
+               priv->rx_skb[rx_pi_rem] = rx_skb;
+               dma_unmap_single(priv->dev, *rx_wqe_addr,
+                                MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+               *rx_wqe_addr = rx_buf_dma;
+       } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
+               priv->stats.rx_mac_errors++;
+       } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED) {
+               priv->stats.rx_truncate_errors++;
+       }
+
+       /* Let hardware know we've replenished one buffer */
+       rx_pi++;
+
+       /* Ensure completion of all writes before notifying HW of replenish */
+       wmb();
+       writeq(rx_pi, priv->base + MLXBF_GIGE_RX_WQE_PI);
+
+       (*rx_pkts)++;
+
+       rx_pi_rem = rx_pi % priv->rx_q_entries;
+       if (rx_pi_rem == 0)
+               priv->valid_polarity ^= 1;
+       rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
+       rx_ci_rem = rx_ci % priv->rx_q_entries;
+
+       if (skb)
+               netif_receive_skb(skb);
+
+       return rx_pi_rem != rx_ci_rem;
+}
+
+/* Driver poll() function called by NAPI infrastructure */
+int mlxbf_gige_poll(struct napi_struct *napi, int budget)
+{
+       struct mlxbf_gige *priv;
+       bool remaining_pkts;
+       int work_done = 0;
+       u64 data;
+
+       priv = container_of(napi, struct mlxbf_gige, napi);
+
+       mlxbf_gige_handle_tx_complete(priv);
+
+       do {
+               remaining_pkts = mlxbf_gige_rx_packet(priv, &work_done);
+       } while (remaining_pkts && work_done < budget);
+
+       /* If amount of work done < budget, turn off NAPI polling
+        * via napi_complete_done(napi, work_done) and then
+        * re-enable interrupts.
+        */
+       if (work_done < budget && napi_complete_done(napi, work_done)) {
+               /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
+                * indicate receive readiness
+                */
+               data = readq(priv->base + MLXBF_GIGE_INT_MASK);
+               data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
+               writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
+       }
+
+       return work_done;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c
new file mode 100644 (file)
index 0000000..04982e8
--- /dev/null
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Packet transmit logic for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/skbuff.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+/* Transmit Initialization
+ * 1) Allocates TX WQE array using coherent DMA mapping
+ * 2) Allocates TX completion counter using coherent DMA mapping
+ */
+int mlxbf_gige_tx_init(struct mlxbf_gige *priv)
+{
+       size_t size;
+
+       size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
+       priv->tx_wqe_base = dma_alloc_coherent(priv->dev, size,
+                                              &priv->tx_wqe_base_dma,
+                                              GFP_KERNEL);
+       if (!priv->tx_wqe_base)
+               return -ENOMEM;
+
+       priv->tx_wqe_next = priv->tx_wqe_base;
+
+       /* Write TX WQE base address into MMIO reg */
+       writeq(priv->tx_wqe_base_dma, priv->base + MLXBF_GIGE_TX_WQ_BASE);
+
+       /* Allocate address for TX completion count */
+       priv->tx_cc = dma_alloc_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
+                                        &priv->tx_cc_dma, GFP_KERNEL);
+       if (!priv->tx_cc) {
+               dma_free_coherent(priv->dev, size,
+                                 priv->tx_wqe_base, priv->tx_wqe_base_dma);
+               return -ENOMEM;
+       }
+
+       /* Write TX CC base address into MMIO reg */
+       writeq(priv->tx_cc_dma, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
+
+       writeq(ilog2(priv->tx_q_entries),
+              priv->base + MLXBF_GIGE_TX_WQ_SIZE_LOG2);
+
+       priv->prev_tx_ci = 0;
+       priv->tx_pi = 0;
+
+       return 0;
+}
+
+/* Transmit Deinitialization
+ * This routine will free allocations done by mlxbf_gige_tx_init(),
+ * namely the TX WQE array and the TX completion counter
+ */
+void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv)
+{
+       u64 *tx_wqe_addr;
+       size_t size;
+       int i;
+
+       tx_wqe_addr = priv->tx_wqe_base;
+
+       for (i = 0; i < priv->tx_q_entries; i++) {
+               if (priv->tx_skb[i]) {
+                       dma_unmap_single(priv->dev, *tx_wqe_addr,
+                                        priv->tx_skb[i]->len, DMA_TO_DEVICE);
+                       dev_kfree_skb(priv->tx_skb[i]);
+                       priv->tx_skb[i] = NULL;
+               }
+               tx_wqe_addr += 2;
+       }
+
+       size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
+       dma_free_coherent(priv->dev, size,
+                         priv->tx_wqe_base, priv->tx_wqe_base_dma);
+
+       dma_free_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
+                         priv->tx_cc, priv->tx_cc_dma);
+
+       priv->tx_wqe_base = NULL;
+       priv->tx_wqe_base_dma = 0;
+       priv->tx_cc = NULL;
+       priv->tx_cc_dma = 0;
+       priv->tx_wqe_next = NULL;
+       writeq(0, priv->base + MLXBF_GIGE_TX_WQ_BASE);
+       writeq(0, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
+}
+
+/* Function that returns status of TX ring:
+ *          0: TX ring is full, i.e. there are no
+ *             available un-used entries in TX ring.
+ *   non-null: TX ring is not full, i.e. there are
+ *             some available entries in TX ring.
+ *             The non-null value is a measure of
+ *             how many TX entries are available, but
+ *             it is not the exact number of available
+ *             entries (see below).
+ *
+ * The algorithm makes the assumption that if
+ * (prev_tx_ci == tx_pi) then the TX ring is empty.
+ * An empty ring actually has (tx_q_entries-1)
+ * entries, which allows the algorithm to differentiate
+ * the case of an empty ring vs. a full ring.
+ */
+static u16 mlxbf_gige_tx_buffs_avail(struct mlxbf_gige *priv)
+{
+       unsigned long flags;
+       u16 avail;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       if (priv->prev_tx_ci == priv->tx_pi)
+               avail = priv->tx_q_entries - 1;
+       else
+               avail = ((priv->tx_q_entries + priv->prev_tx_ci - priv->tx_pi)
+                         % priv->tx_q_entries) - 1;
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       return avail;
+}
+
+bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv)
+{
+       struct net_device_stats *stats;
+       u16 tx_wqe_index;
+       u64 *tx_wqe_addr;
+       u64 tx_status;
+       u16 tx_ci;
+
+       tx_status = readq(priv->base + MLXBF_GIGE_TX_STATUS);
+       if (tx_status & MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL)
+               priv->stats.tx_fifo_full++;
+       tx_ci = readq(priv->base + MLXBF_GIGE_TX_CONSUMER_INDEX);
+       stats = &priv->netdev->stats;
+
+       /* Transmit completion logic needs to loop until the completion
+        * index (in SW) equals TX consumer index (from HW).  These
+        * parameters are unsigned 16-bit values and the wrap case needs
+        * to be supported, that is TX consumer index wrapped from 0xFFFF
+        * to 0 while TX completion index is still < 0xFFFF.
+        */
+       for (; priv->prev_tx_ci != tx_ci; priv->prev_tx_ci++) {
+               tx_wqe_index = priv->prev_tx_ci % priv->tx_q_entries;
+               /* Each TX WQE is 16 bytes. The 8 MSB store the 2KB TX
+                * buffer address and the 8 LSB contain information
+                * about the TX WQE.
+                */
+               tx_wqe_addr = priv->tx_wqe_base +
+                              (tx_wqe_index * MLXBF_GIGE_TX_WQE_SZ_QWORDS);
+
+               stats->tx_packets++;
+               stats->tx_bytes += MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr);
+
+               dma_unmap_single(priv->dev, *tx_wqe_addr,
+                                priv->tx_skb[tx_wqe_index]->len, DMA_TO_DEVICE);
+               dev_consume_skb_any(priv->tx_skb[tx_wqe_index]);
+               priv->tx_skb[tx_wqe_index] = NULL;
+
+               /* Ensure completion of updates across all cores */
+               mb();
+       }
+
+       /* Since the TX ring was likely just drained, check if TX queue
+        * had previously been stopped and now that there are TX buffers
+        * available the TX queue can be awakened.
+        */
+       if (netif_queue_stopped(priv->netdev) &&
+           mlxbf_gige_tx_buffs_avail(priv))
+               netif_wake_queue(priv->netdev);
+
+       return true;
+}
+
+/* Function to advance the tx_wqe_next pointer to next TX WQE */
+void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv)
+{
+       /* Advance tx_wqe_next pointer */
+       priv->tx_wqe_next += MLXBF_GIGE_TX_WQE_SZ_QWORDS;
+
+       /* Check if 'next' pointer is beyond end of TX ring */
+       /* If so, set 'next' back to 'base' pointer of ring */
+       if (priv->tx_wqe_next == (priv->tx_wqe_base +
+                                 (priv->tx_q_entries * MLXBF_GIGE_TX_WQE_SZ_QWORDS)))
+               priv->tx_wqe_next = priv->tx_wqe_base;
+}
+
+netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
+                                 struct net_device *netdev)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+       long buff_addr, start_dma_page, end_dma_page;
+       struct sk_buff *tx_skb;
+       dma_addr_t tx_buf_dma;
+       unsigned long flags;
+       u64 *tx_wqe_addr;
+       u64 word2;
+
+       /* If needed, linearize TX SKB as hardware DMA expects this */
+       if (skb->len > MLXBF_GIGE_DEFAULT_BUF_SZ || skb_linearize(skb)) {
+               dev_kfree_skb(skb);
+               netdev->stats.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
+
+       buff_addr = (long)skb->data;
+       start_dma_page = buff_addr >> MLXBF_GIGE_DMA_PAGE_SHIFT;
+       end_dma_page   = (buff_addr + skb->len - 1) >> MLXBF_GIGE_DMA_PAGE_SHIFT;
+
+       /* Verify that payload pointer and data length of SKB to be
+        * transmitted does not violate the hardware DMA limitation.
+        */
+       if (start_dma_page != end_dma_page) {
+               /* DMA operation would fail as-is, alloc new aligned SKB */
+               tx_skb = mlxbf_gige_alloc_skb(priv, skb->len,
+                                             &tx_buf_dma, DMA_TO_DEVICE);
+               if (!tx_skb) {
+                       /* Free original skb, could not alloc new aligned SKB */
+                       dev_kfree_skb(skb);
+                       netdev->stats.tx_dropped++;
+                       return NETDEV_TX_OK;
+               }
+
+               skb_put_data(tx_skb, skb->data, skb->len);
+
+               /* Free the original SKB */
+               dev_kfree_skb(skb);
+       } else {
+               tx_skb = skb;
+               tx_buf_dma = dma_map_single(priv->dev, skb->data,
+                                           skb->len, DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->dev, tx_buf_dma)) {
+                       dev_kfree_skb(skb);
+                       netdev->stats.tx_dropped++;
+                       return NETDEV_TX_OK;
+               }
+       }
+
+       /* Get address of TX WQE */
+       tx_wqe_addr = priv->tx_wqe_next;
+
+       mlxbf_gige_update_tx_wqe_next(priv);
+
+       /* Put PA of buffer address into first 64-bit word of TX WQE */
+       *tx_wqe_addr = tx_buf_dma;
+
+       /* Set TX WQE pkt_len appropriately
+        * NOTE: GigE silicon will automatically pad up to
+        *       minimum packet length if needed.
+        */
+       word2 = tx_skb->len & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK;
+
+       /* Write entire 2nd word of TX WQE */
+       *(tx_wqe_addr + 1) = word2;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       priv->tx_skb[priv->tx_pi % priv->tx_q_entries] = tx_skb;
+       priv->tx_pi++;
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!netdev_xmit_more()) {
+               /* Create memory barrier before write to TX PI */
+               wmb();
+               writeq(priv->tx_pi, priv->base + MLXBF_GIGE_TX_PRODUCER_INDEX);
+       }
+
+       /* Check if the last TX entry was just used */
+       if (!mlxbf_gige_tx_buffs_avail(priv)) {
+               /* TX ring is full, inform stack */
+               netif_stop_queue(netdev);
+
+               /* Since there is no separate "TX complete" interrupt, need
+                * to explicitly schedule NAPI poll.  This will trigger logic
+                * which processes TX completions, and will hopefully drain
+                * the TX ring allowing the TX queue to be awakened.
+                */
+               napi_schedule(&priv->napi);
+       }
+
+       return NETDEV_TX_OK;
+}
index 4a0dbdb..3713c45 100644 (file)
@@ -26,8 +26,8 @@ struct mlxsw_env {
 static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
                                          bool *qsfp, bool *cmis)
 {
-       char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
        char mcia_pl[MLXSW_REG_MCIA_LEN];
+       char *eeprom_tmp;
        u8 ident;
        int err;
 
@@ -36,7 +36,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
        err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
        if (err)
                return err;
-       mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+       eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
        ident = eeprom_tmp[0];
        *cmis = false;
        switch (ident) {
@@ -64,8 +64,8 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
                              u16 offset, u16 size, void *data,
                              bool qsfp, unsigned int *p_read_size)
 {
-       char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
        char mcia_pl[MLXSW_REG_MCIA_LEN];
+       char *eeprom_tmp;
        u16 i2c_addr;
        u8 page = 0;
        int status;
@@ -116,7 +116,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
        if (status)
                return -EIO;
 
-       mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+       eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
        memcpy(data, eeprom_tmp, size);
        *p_read_size = size;
 
@@ -127,13 +127,13 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
                                         int off, int *temp)
 {
        unsigned int module_temp, module_crit, module_emerg;
-       char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
        union {
                u8 buf[MLXSW_REG_MCIA_TH_ITEM_SIZE];
                u16 temp;
        } temp_thresh;
        char mcia_pl[MLXSW_REG_MCIA_LEN] = {0};
        char mtmp_pl[MLXSW_REG_MTMP_LEN];
+       char *eeprom_tmp;
        bool qsfp, cmis;
        int page;
        int err;
@@ -195,7 +195,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
        if (err)
                return err;
 
-       mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+       eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
        memcpy(temp_thresh.buf, eeprom_tmp, MLXSW_REG_MCIA_TH_ITEM_SIZE);
        *temp = temp_thresh.temp * 1000;
 
@@ -357,8 +357,8 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
        device_addr = page->offset;
 
        while (bytes_read < page->length) {
-               char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
                char mcia_pl[MLXSW_REG_MCIA_LEN];
+               char *eeprom_tmp;
                u8 size;
                int err;
 
@@ -380,7 +380,7 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
                if (err)
                        return err;
 
-               mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+               eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
                memcpy(page->data + bytes_read, eeprom_tmp, size);
                bytes_read += size;
        }
index d0f6dfe..d54aa16 100644 (file)
@@ -54,4 +54,6 @@ config LAN743X
          To compile this driver as a module, choose M here. The module will be
          called lan743x.
 
+source "drivers/net/ethernet/microchip/sparx5/Kconfig"
+
 endif # NET_VENDOR_MICROCHIP
index da60354..c77dc03 100644 (file)
@@ -8,3 +8,5 @@ obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
 obj-$(CONFIG_LAN743X) += lan743x.o
 
 lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+
+obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
new file mode 100644 (file)
index 0000000..a80419d
--- /dev/null
@@ -0,0 +1,9 @@
+config SPARX5_SWITCH
+       tristate "Sparx5 switch driver"
+       depends on NET_SWITCHDEV
+       depends on HAS_IOMEM
+       select PHYLINK
+       select PHY_SPARX5_SERDES
+       select RESET_CONTROLLER
+       help
+         This driver supports the Sparx5 network switch device.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
new file mode 100644 (file)
index 0000000..faa8f07
--- /dev/null
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip Sparx5 network device drivers.
+#
+
+obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
+
+sparx5-switch-objs  := sparx5_main.o sparx5_packet.o \
+ sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
+ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
new file mode 100644 (file)
index 0000000..76a8bb5
--- /dev/null
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+/* QSYS calendar information */
+#define SPX5_PORTS_PER_CALREG          10  /* Ports mapped in a calendar register */
+#define SPX5_CALBITS_PER_PORT          3   /* Bit per port in calendar register */
+
+/* DSM calendar information */
+#define SPX5_DSM_CAL_LEN               64
+#define SPX5_DSM_CAL_EMPTY             0xFFFF
+#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
+#define SPX5_DSM_CAL_TAXIS             8
+#define SPX5_DSM_CAL_BW_LOSS           553
+
+#define SPX5_TAXI_PORT_MAX             70
+
+#define SPEED_12500                    12500
+
+/* Maps from taxis to port numbers */
+static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = {
+       {57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23},
+       {58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31},
+       {59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39},
+       {60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47},
+       {61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+       {62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+       {56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+       {64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+};
+
+struct sparx5_calendar_data {
+       u32 schedule[SPX5_DSM_CAL_LEN];
+       u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+       u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+       u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+       u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+       u32 new_slots[SPX5_DSM_CAL_LEN];
+       u32 temp_sched[SPX5_DSM_CAL_LEN];
+       u32 indices[SPX5_DSM_CAL_LEN];
+       u32 short_list[SPX5_DSM_CAL_LEN];
+       u32 long_list[SPX5_DSM_CAL_LEN];
+};
+
+static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
+{
+       switch (sparx5->target_ct) {
+       case SPX5_TARGET_CT_7546:
+       case SPX5_TARGET_CT_7546TSN:
+               return 65000;
+       case SPX5_TARGET_CT_7549:
+       case SPX5_TARGET_CT_7549TSN:
+               return 91000;
+       case SPX5_TARGET_CT_7552:
+       case SPX5_TARGET_CT_7552TSN:
+               return 129000;
+       case SPX5_TARGET_CT_7556:
+       case SPX5_TARGET_CT_7556TSN:
+               return 161000;
+       case SPX5_TARGET_CT_7558:
+       case SPX5_TARGET_CT_7558TSN:
+               return 201000;
+       default:
+               return 0;
+       }
+}
+
+/* This is used in calendar configuration */
+enum sparx5_cal_bw {
+       SPX5_CAL_SPEED_NONE = 0,
+       SPX5_CAL_SPEED_1G   = 1,
+       SPX5_CAL_SPEED_2G5  = 2,
+       SPX5_CAL_SPEED_5G   = 3,
+       SPX5_CAL_SPEED_10G  = 4,
+       SPX5_CAL_SPEED_25G  = 5,
+       SPX5_CAL_SPEED_0G5  = 6,
+       SPX5_CAL_SPEED_12G5 = 7
+};
+
+static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
+{
+       switch (cclock) {
+       case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
+       case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
+       case SPX5_CORE_CLOCK_625MHZ: return  208000; /* 625000 / 3 */
+       default: return 0;
+       }
+       return 0;
+}
+
+static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
+{
+       switch (speed) {
+       case SPX5_CAL_SPEED_1G:   return 1000;
+       case SPX5_CAL_SPEED_2G5:  return 2500;
+       case SPX5_CAL_SPEED_5G:   return 5000;
+       case SPX5_CAL_SPEED_10G:  return 10000;
+       case SPX5_CAL_SPEED_25G:  return 25000;
+       case SPX5_CAL_SPEED_0G5:  return 500;
+       case SPX5_CAL_SPEED_12G5: return 12500;
+       default: return 0;
+       }
+}
+
+static u32 sparx5_bandwidth_to_calendar(u32 bw)
+{
+       switch (bw) {
+       case SPEED_10:      return SPX5_CAL_SPEED_0G5;
+       case SPEED_100:     return SPX5_CAL_SPEED_0G5;
+       case SPEED_1000:    return SPX5_CAL_SPEED_1G;
+       case SPEED_2500:    return SPX5_CAL_SPEED_2G5;
+       case SPEED_5000:    return SPX5_CAL_SPEED_5G;
+       case SPEED_10000:   return SPX5_CAL_SPEED_10G;
+       case SPEED_12500:   return SPX5_CAL_SPEED_12G5;
+       case SPEED_25000:   return SPX5_CAL_SPEED_25G;
+       case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G;
+       default:            return SPX5_CAL_SPEED_NONE;
+       }
+}
+
+static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
+                                                   u32 portno)
+{
+       struct sparx5_port *port;
+
+       if (portno >= SPX5_PORTS) {
+               /* Internal ports */
+               if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) {
+                       /* Equals 1.25G */
+                       return SPX5_CAL_SPEED_2G5;
+               } else if (portno == SPX5_PORT_VD0) {
+                       /* IPMC only idle BW */
+                       return SPX5_CAL_SPEED_NONE;
+               } else if (portno == SPX5_PORT_VD1) {
+                       /* OAM only idle BW */
+                       return SPX5_CAL_SPEED_NONE;
+               } else if (portno == SPX5_PORT_VD2) {
+                       /* IPinIP gets only idle BW */
+                       return SPX5_CAL_SPEED_NONE;
+               }
+               /* not in port map */
+               return SPX5_CAL_SPEED_NONE;
+       }
+       /* Front ports - may be used */
+       port = sparx5->ports[portno];
+       if (!port)
+               return SPX5_CAL_SPEED_NONE;
+       return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
+}
+
+/* Auto configure the QSYS calendar based on port configuration */
+int sparx5_config_auto_calendar(struct sparx5 *sparx5)
+{
+       u32 cal[7], value, idx, portno;
+       u32 max_core_bw;
+       u32 total_bw = 0, used_port_bw = 0;
+       int err = 0;
+       enum sparx5_cal_bw spd;
+
+       memset(cal, 0, sizeof(cal));
+
+       max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock);
+       if (max_core_bw == 0) {
+               dev_err(sparx5->dev, "Core clock not supported");
+               return -EINVAL;
+       }
+
+       /* Setup the calendar with the bandwidth to each port */
+       for (portno = 0; portno < SPX5_PORTS_ALL; portno++) {
+               u64 reg, offset, this_bw;
+
+               spd = sparx5_get_port_cal_speed(sparx5, portno);
+               if (spd == SPX5_CAL_SPEED_NONE)
+                       continue;
+
+               this_bw = sparx5_cal_speed_to_value(spd);
+               if (portno < SPX5_PORTS)
+                       used_port_bw += this_bw;
+               else
+                       /* Internal ports are granted half the value */
+                       this_bw = this_bw / 2;
+               total_bw += this_bw;
+               reg = portno;
+               offset = do_div(reg, SPX5_PORTS_PER_CALREG);
+               cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT);
+       }
+
+       if (used_port_bw > sparx5_target_bandwidth(sparx5)) {
+               dev_err(sparx5->dev,
+                       "Port BW %u above target BW %u\n",
+                       used_port_bw, sparx5_target_bandwidth(sparx5));
+               return -EINVAL;
+       }
+
+       if (total_bw > max_core_bw) {
+               dev_err(sparx5->dev,
+                       "Total BW %u above switch core BW %u\n",
+                       total_bw, max_core_bw);
+               return -EINVAL;
+       }
+
+       /* Halt the calendar while changing it */
+       spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
+                QSYS_CAL_CTRL_CAL_MODE,
+                sparx5, QSYS_CAL_CTRL);
+
+       /* Assign port bandwidth to auto calendar */
+       for (idx = 0; idx < ARRAY_SIZE(cal); idx++)
+               spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
+
+       /* Increase grant rate of all ports to account for
+        * core clock ppm deviations
+        */
+       spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */
+                QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE,
+                sparx5,
+                QSYS_CAL_CTRL);
+
+       /* Grant idle usage to VD 0-2 */
+       for (idx = 2; idx < 5; idx++)
+               spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12),
+                       sparx5,
+                       HSCH_OUTB_SHARE_ENA(idx));
+
+       /* Enable Auto mode */
+       spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8),
+                QSYS_CAL_CTRL_CAL_MODE,
+                sparx5, QSYS_CAL_CTRL);
+
+       /* Verify successful calendar config */
+       value = spx5_rd(sparx5, QSYS_CAL_CTRL);
+       if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) {
+               dev_err(sparx5->dev, "QSYS calendar error\n");
+               err = -EINVAL;
+       }
+       return err;
+}
+
+static u32 sparx5_dsm_exb_gcd(u32 a, u32 b)
+{
+       if (b == 0)
+               return a;
+       return sparx5_dsm_exb_gcd(b, a % b);
+}
+
+static u32 sparx5_dsm_cal_len(u32 *cal)
+{
+       u32 idx = 0, len = 0;
+
+       while (idx < SPX5_DSM_CAL_LEN) {
+               if (cal[idx] != SPX5_DSM_CAL_EMPTY)
+                       len++;
+               idx++;
+       }
+       return len;
+}
+
+static u32 sparx5_dsm_cp_cal(u32 *sched)
+{
+       u32 idx = 0, tmp;
+
+       while (idx < SPX5_DSM_CAL_LEN) {
+               if (sched[idx] != SPX5_DSM_CAL_EMPTY) {
+                       tmp = sched[idx];
+                       sched[idx] = SPX5_DSM_CAL_EMPTY;
+                       return tmp;
+               }
+               idx++;
+       }
+       return SPX5_DSM_CAL_EMPTY;
+}
+
+static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+                                   struct sparx5_calendar_data *data)
+{
+       bool slow_mode;
+       u32 gcd, idx, sum, min, factor;
+       u32 num_of_slots, slot_spd, empty_slots;
+       u32 taxi_bw, clk_period_ps;
+
+       clk_period_ps = sparx5_clk_period(sparx5->coreclock);
+       taxi_bw = 128 * 1000000 / clk_period_ps;
+       slow_mode = !!(clk_period_ps > 2000);
+       memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi],
+              sizeof(data->taxi_ports));
+
+       for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
+               data->new_slots[idx] = SPX5_DSM_CAL_EMPTY;
+               data->schedule[idx] = SPX5_DSM_CAL_EMPTY;
+               data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY;
+       }
+       /* Default empty calendar */
+       data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
+
+       /* Map ports to taxi positions */
+       for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
+               u32 portno = data->taxi_ports[idx];
+
+               if (portno < SPX5_TAXI_PORT_MAX) {
+                       data->taxi_speeds[idx] = sparx5_cal_speed_to_value
+                               (sparx5_get_port_cal_speed(sparx5, portno));
+               } else {
+                       data->taxi_speeds[idx] = 0;
+               }
+       }
+
+       sum = 0;
+       min = 25000;
+       for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
+               u32 jdx;
+
+               sum += data->taxi_speeds[idx];
+               if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min)
+                       min = data->taxi_speeds[idx];
+               gcd = min;
+               for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++)
+                       gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]);
+       }
+       if (sum == 0) /* Empty calendar */
+               return 0;
+       /* Make room for overhead traffic */
+       factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS);
+
+       if (sum * factor > (taxi_bw * 1000)) {
+               dev_err(sparx5->dev,
+                       "Taxi %u, Requested BW %u above available BW %u\n",
+                       taxi, sum, taxi_bw);
+               return -EINVAL;
+       }
+       for (idx = 0; idx < 4; idx++) {
+               u32 raw_spd;
+
+               if (idx == 0)
+                       raw_spd = gcd / 5;
+               else if (idx == 1)
+                       raw_spd = gcd / 2;
+               else if (idx == 2)
+                       raw_spd = gcd;
+               else
+                       raw_spd = min;
+               slot_spd = raw_spd * factor / 1000;
+               num_of_slots = taxi_bw / slot_spd;
+               if (num_of_slots <= 64)
+                       break;
+       }
+
+       num_of_slots = num_of_slots > 64 ? 64 : num_of_slots;
+       slot_spd = taxi_bw / num_of_slots;
+
+       sum = 0;
+       for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
+               u32 spd = data->taxi_speeds[idx];
+               u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000;
+
+               if (adjusted_speed > 0) {
+                       data->avg_dist[idx] = (128 * 1000000 * 10) /
+                               (adjusted_speed * clk_period_ps);
+               } else {
+                       data->avg_dist[idx] = -1;
+               }
+               data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000;
+               if (spd != 25000 && (spd != 10000 || !slow_mode)) {
+                       if (num_of_slots < (5 * data->dev_slots[idx])) {
+                               dev_err(sparx5->dev,
+                                       "Taxi %u, speed %u, Low slot sep.\n",
+                                       taxi, spd);
+                               return -EINVAL;
+                       }
+               }
+               sum += data->dev_slots[idx];
+               if (sum > num_of_slots) {
+                       dev_err(sparx5->dev,
+                               "Taxi %u with overhead factor %u\n",
+                               taxi, factor);
+                       return -EINVAL;
+               }
+       }
+
+       empty_slots = num_of_slots - sum;
+
+       for (idx = 0; idx < empty_slots; idx++)
+               data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
+
+       for (idx = 1; idx < num_of_slots; idx++) {
+               u32 indices_len = 0;
+               u32 slot, jdx, kdx, ts;
+               s32 cnt;
+               u32 num_of_old_slots, num_of_new_slots, tgt_score;
+
+               for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) {
+                       if (data->dev_slots[slot] == idx) {
+                               data->indices[indices_len] = slot;
+                               indices_len++;
+                       }
+               }
+               if (indices_len == 0)
+                       continue;
+               kdx = 0;
+               for (slot = 0; slot < idx; slot++) {
+                       for (jdx = 0; jdx < indices_len; jdx++, kdx++)
+                               data->new_slots[kdx] = data->indices[jdx];
+               }
+
+               for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+                       if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY)
+                               break;
+               }
+
+               num_of_old_slots =  slot;
+               num_of_new_slots =  kdx;
+               cnt = 0;
+               ts = 0;
+
+               if (num_of_new_slots > num_of_old_slots) {
+                       memcpy(data->short_list, data->schedule,
+                              sizeof(data->short_list));
+                       memcpy(data->long_list, data->new_slots,
+                              sizeof(data->long_list));
+                       tgt_score = 100000 * num_of_old_slots /
+                               num_of_new_slots;
+               } else {
+                       memcpy(data->short_list, data->new_slots,
+                              sizeof(data->short_list));
+                       memcpy(data->long_list, data->schedule,
+                              sizeof(data->long_list));
+                       tgt_score = 100000 * num_of_new_slots /
+                               num_of_old_slots;
+               }
+
+               while (sparx5_dsm_cal_len(data->short_list) > 0 ||
+                      sparx5_dsm_cal_len(data->long_list) > 0) {
+                       u32 act = 0;
+
+                       if (sparx5_dsm_cal_len(data->short_list) > 0) {
+                               data->temp_sched[ts] =
+                                       sparx5_dsm_cp_cal(data->short_list);
+                               ts++;
+                               cnt += 100000;
+                               act = 1;
+                       }
+                       while (sparx5_dsm_cal_len(data->long_list) > 0 &&
+                              cnt > 0) {
+                               data->temp_sched[ts] =
+                                       sparx5_dsm_cp_cal(data->long_list);
+                               ts++;
+                               cnt -= tgt_score;
+                               act = 1;
+                       }
+                       if (act == 0) {
+                               dev_err(sparx5->dev,
+                                       "Error in DSM calendar calculation\n");
+                               return -EINVAL;
+                       }
+               }
+
+               for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+                       if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY)
+                               break;
+               }
+               for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+                       data->schedule[slot] = data->temp_sched[slot];
+                       data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY;
+                       data->new_slots[slot] = SPX5_DSM_CAL_EMPTY;
+               }
+       }
+       return 0;
+}
+
+static int sparx5_dsm_calendar_check(struct sparx5 *sparx5,
+                                    struct sparx5_calendar_data *data)
+{
+       u32 num_of_slots, idx, port;
+       int cnt, max_dist;
+       u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN];
+       u32 cal_length = sparx5_dsm_cal_len(data->schedule);
+
+       for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) {
+               num_of_slots = 0;
+               max_dist = data->avg_dist[port];
+               for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
+                       slot_indices[idx] = SPX5_DSM_CAL_EMPTY;
+                       distances[idx] = SPX5_DSM_CAL_EMPTY;
+               }
+
+               for (idx = 0; idx < cal_length; idx++) {
+                       if (data->schedule[idx] == port) {
+                               slot_indices[num_of_slots] = idx;
+                               num_of_slots++;
+                       }
+               }
+
+               slot_indices[num_of_slots] = slot_indices[0] + cal_length;
+
+               for (idx = 0; idx < num_of_slots; idx++) {
+                       distances[idx] = (slot_indices[idx + 1] -
+                                         slot_indices[idx]) * 10;
+               }
+
+               for (idx = 0; idx < num_of_slots; idx++) {
+                       u32 jdx, kdx;
+
+                       cnt = distances[idx] - max_dist;
+                       if (cnt < 0)
+                               cnt = -cnt;
+                       kdx = 0;
+                       for (jdx = (idx + 1) % num_of_slots;
+                            jdx != idx;
+                            jdx = (jdx + 1) % num_of_slots, kdx++) {
+                               cnt =  cnt + distances[jdx] - max_dist;
+                               if (cnt < 0)
+                                       cnt = -cnt;
+                               if (cnt > max_dist)
+                                       goto check_err;
+                       }
+               }
+       }
+       return 0;
+check_err:
+       dev_err(sparx5->dev,
+               "Port %u: distance %u above limit %d\n",
+               port, cnt, max_dist);
+       return -EINVAL;
+}
+
+static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
+                                     struct sparx5_calendar_data *data)
+{
+       u32 idx;
+       u32 cal_len = sparx5_dsm_cal_len(data->schedule), len;
+
+       spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
+               sparx5,
+               DSM_TAXI_CAL_CFG(taxi));
+       for (idx = 0; idx < cal_len; idx++) {
+               spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
+                        DSM_TAXI_CAL_CFG_CAL_IDX,
+                        sparx5,
+                        DSM_TAXI_CAL_CFG(taxi));
+               spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]),
+                        DSM_TAXI_CAL_CFG_CAL_PGM_VAL,
+                        sparx5,
+                        DSM_TAXI_CAL_CFG(taxi));
+       }
+       spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
+               sparx5,
+               DSM_TAXI_CAL_CFG(taxi));
+       len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
+                                                      DSM_TAXI_CAL_CFG(taxi)));
+       if (len != cal_len - 1)
+               goto update_err;
+       return 0;
+update_err:
+       dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
+       return -EINVAL;
+}
+
+/* Configure the DSM calendar based on port configuration */
+int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
+{
+       int taxi;
+       struct sparx5_calendar_data *data;
+       int err = 0;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) {
+               err = sparx5_dsm_calendar_calc(sparx5, taxi, data);
+               if (err) {
+                       dev_err(sparx5->dev, "DSM calendar calculation failed\n");
+                       goto cal_out;
+               }
+               err = sparx5_dsm_calendar_check(sparx5, data);
+               if (err) {
+                       dev_err(sparx5->dev, "DSM calendar check failed\n");
+                       goto cal_out;
+               }
+               err = sparx5_dsm_calendar_update(sparx5, taxi, data);
+               if (err) {
+                       dev_err(sparx5->dev, "DSM calendar update failed\n");
+                       goto cal_out;
+               }
+       }
+cal_out:
+       kfree(data);
+       return err;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
new file mode 100644 (file)
index 0000000..59783fc
--- /dev/null
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/ethtool.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+/* Index of ANA_AC port counters */
+#define SPX5_PORT_POLICER_DROPS 0
+
+/* Add a potentially wrapping 32 bit value to a 64 bit counter */
+static void sparx5_update_counter(u64 *cnt, u32 val)
+{
+       if (val < (*cnt & U32_MAX))
+               *cnt += (u64)1 << 32; /* value has wrapped */
+       *cnt = (*cnt & ~(u64)U32_MAX) + val;
+}
+
+enum sparx5_stats_entry {
+       spx5_stats_rx_symbol_err_cnt = 0,
+       spx5_stats_pmac_rx_symbol_err_cnt = 1,
+       spx5_stats_tx_uc_cnt = 2,
+       spx5_stats_pmac_tx_uc_cnt = 3,
+       spx5_stats_tx_mc_cnt = 4,
+       spx5_stats_tx_bc_cnt = 5,
+       spx5_stats_tx_backoff1_cnt = 6,
+       spx5_stats_tx_multi_coll_cnt = 7,
+       spx5_stats_rx_uc_cnt = 8,
+       spx5_stats_pmac_rx_uc_cnt = 9,
+       spx5_stats_rx_mc_cnt = 10,
+       spx5_stats_rx_bc_cnt = 11,
+       spx5_stats_rx_crc_err_cnt = 12,
+       spx5_stats_pmac_rx_crc_err_cnt = 13,
+       spx5_stats_rx_alignment_lost_cnt = 14,
+       spx5_stats_pmac_rx_alignment_lost_cnt = 15,
+       spx5_stats_tx_ok_bytes_cnt = 16,
+       spx5_stats_pmac_tx_ok_bytes_cnt = 17,
+       spx5_stats_tx_defer_cnt = 18,
+       spx5_stats_tx_late_coll_cnt = 19,
+       spx5_stats_tx_xcoll_cnt = 20,
+       spx5_stats_tx_csense_cnt = 21,
+       spx5_stats_rx_ok_bytes_cnt = 22,
+       spx5_stats_pmac_rx_ok_bytes_cnt = 23,
+       spx5_stats_pmac_tx_mc_cnt = 24,
+       spx5_stats_pmac_tx_bc_cnt = 25,
+       spx5_stats_tx_xdefer_cnt = 26,
+       spx5_stats_pmac_rx_mc_cnt = 27,
+       spx5_stats_pmac_rx_bc_cnt = 28,
+       spx5_stats_rx_in_range_len_err_cnt = 29,
+       spx5_stats_pmac_rx_in_range_len_err_cnt = 30,
+       spx5_stats_rx_out_of_range_len_err_cnt = 31,
+       spx5_stats_pmac_rx_out_of_range_len_err_cnt = 32,
+       spx5_stats_rx_oversize_cnt = 33,
+       spx5_stats_pmac_rx_oversize_cnt = 34,
+       spx5_stats_tx_pause_cnt = 35,
+       spx5_stats_pmac_tx_pause_cnt = 36,
+       spx5_stats_rx_pause_cnt = 37,
+       spx5_stats_pmac_rx_pause_cnt = 38,
+       spx5_stats_rx_unsup_opcode_cnt = 39,
+       spx5_stats_pmac_rx_unsup_opcode_cnt = 40,
+       spx5_stats_rx_undersize_cnt = 41,
+       spx5_stats_pmac_rx_undersize_cnt = 42,
+       spx5_stats_rx_fragments_cnt = 43,
+       spx5_stats_pmac_rx_fragments_cnt = 44,
+       spx5_stats_rx_jabbers_cnt = 45,
+       spx5_stats_pmac_rx_jabbers_cnt = 46,
+       spx5_stats_rx_size64_cnt = 47,
+       spx5_stats_pmac_rx_size64_cnt = 48,
+       spx5_stats_rx_size65to127_cnt = 49,
+       spx5_stats_pmac_rx_size65to127_cnt = 50,
+       spx5_stats_rx_size128to255_cnt = 51,
+       spx5_stats_pmac_rx_size128to255_cnt = 52,
+       spx5_stats_rx_size256to511_cnt = 53,
+       spx5_stats_pmac_rx_size256to511_cnt = 54,
+       spx5_stats_rx_size512to1023_cnt = 55,
+       spx5_stats_pmac_rx_size512to1023_cnt = 56,
+       spx5_stats_rx_size1024to1518_cnt = 57,
+       spx5_stats_pmac_rx_size1024to1518_cnt = 58,
+       spx5_stats_rx_size1519tomax_cnt = 59,
+       spx5_stats_pmac_rx_size1519tomax_cnt = 60,
+       spx5_stats_tx_size64_cnt = 61,
+       spx5_stats_pmac_tx_size64_cnt = 62,
+       spx5_stats_tx_size65to127_cnt = 63,
+       spx5_stats_pmac_tx_size65to127_cnt = 64,
+       spx5_stats_tx_size128to255_cnt = 65,
+       spx5_stats_pmac_tx_size128to255_cnt = 66,
+       spx5_stats_tx_size256to511_cnt = 67,
+       spx5_stats_pmac_tx_size256to511_cnt = 68,
+       spx5_stats_tx_size512to1023_cnt = 69,
+       spx5_stats_pmac_tx_size512to1023_cnt = 70,
+       spx5_stats_tx_size1024to1518_cnt = 71,
+       spx5_stats_pmac_tx_size1024to1518_cnt = 72,
+       spx5_stats_tx_size1519tomax_cnt = 73,
+       spx5_stats_pmac_tx_size1519tomax_cnt = 74,
+       spx5_stats_mm_rx_assembly_err_cnt = 75,
+       spx5_stats_mm_rx_assembly_ok_cnt = 76,
+       spx5_stats_mm_rx_merge_frag_cnt = 77,
+       spx5_stats_mm_rx_smd_err_cnt = 78,
+       spx5_stats_mm_tx_pfragment_cnt = 79,
+       spx5_stats_rx_bad_bytes_cnt = 80,
+       spx5_stats_pmac_rx_bad_bytes_cnt = 81,
+       spx5_stats_rx_in_bytes_cnt = 82,
+       spx5_stats_rx_ipg_shrink_cnt = 83,
+       spx5_stats_rx_sync_lost_err_cnt = 84,
+       spx5_stats_rx_tagged_frms_cnt = 85,
+       spx5_stats_rx_untagged_frms_cnt = 86,
+       spx5_stats_tx_out_bytes_cnt = 87,
+       spx5_stats_tx_tagged_frms_cnt = 88,
+       spx5_stats_tx_untagged_frms_cnt = 89,
+       spx5_stats_rx_hih_cksm_err_cnt = 90,
+       spx5_stats_pmac_rx_hih_cksm_err_cnt = 91,
+       spx5_stats_rx_xgmii_prot_err_cnt = 92,
+       spx5_stats_pmac_rx_xgmii_prot_err_cnt = 93,
+       spx5_stats_ana_ac_port_stat_lsb_cnt = 94,
+       spx5_stats_green_p0_rx_fwd = 95,
+       spx5_stats_green_p0_rx_port_drop = 111,
+       spx5_stats_green_p0_tx_port = 127,
+       spx5_stats_rx_local_drop = 143,
+       spx5_stats_tx_local_drop = 144,
+       spx5_stats_count = 145,
+};
+
+static const char *const sparx5_stats_layout[] = {
+       "mm_rx_assembly_err_cnt",
+       "mm_rx_assembly_ok_cnt",
+       "mm_rx_merge_frag_cnt",
+       "mm_rx_smd_err_cnt",
+       "mm_tx_pfragment_cnt",
+       "rx_bad_bytes_cnt",
+       "pmac_rx_bad_bytes_cnt",
+       "rx_in_bytes_cnt",
+       "rx_ipg_shrink_cnt",
+       "rx_sync_lost_err_cnt",
+       "rx_tagged_frms_cnt",
+       "rx_untagged_frms_cnt",
+       "tx_out_bytes_cnt",
+       "tx_tagged_frms_cnt",
+       "tx_untagged_frms_cnt",
+       "rx_hih_cksm_err_cnt",
+       "pmac_rx_hih_cksm_err_cnt",
+       "rx_xgmii_prot_err_cnt",
+       "pmac_rx_xgmii_prot_err_cnt",
+       "rx_port_policer_drop",
+       "rx_fwd_green_p0",
+       "rx_fwd_green_p1",
+       "rx_fwd_green_p2",
+       "rx_fwd_green_p3",
+       "rx_fwd_green_p4",
+       "rx_fwd_green_p5",
+       "rx_fwd_green_p6",
+       "rx_fwd_green_p7",
+       "rx_fwd_yellow_p0",
+       "rx_fwd_yellow_p1",
+       "rx_fwd_yellow_p2",
+       "rx_fwd_yellow_p3",
+       "rx_fwd_yellow_p4",
+       "rx_fwd_yellow_p5",
+       "rx_fwd_yellow_p6",
+       "rx_fwd_yellow_p7",
+       "rx_port_drop_green_p0",
+       "rx_port_drop_green_p1",
+       "rx_port_drop_green_p2",
+       "rx_port_drop_green_p3",
+       "rx_port_drop_green_p4",
+       "rx_port_drop_green_p5",
+       "rx_port_drop_green_p6",
+       "rx_port_drop_green_p7",
+       "rx_port_drop_yellow_p0",
+       "rx_port_drop_yellow_p1",
+       "rx_port_drop_yellow_p2",
+       "rx_port_drop_yellow_p3",
+       "rx_port_drop_yellow_p4",
+       "rx_port_drop_yellow_p5",
+       "rx_port_drop_yellow_p6",
+       "rx_port_drop_yellow_p7",
+       "tx_port_green_p0",
+       "tx_port_green_p1",
+       "tx_port_green_p2",
+       "tx_port_green_p3",
+       "tx_port_green_p4",
+       "tx_port_green_p5",
+       "tx_port_green_p6",
+       "tx_port_green_p7",
+       "tx_port_yellow_p0",
+       "tx_port_yellow_p1",
+       "tx_port_yellow_p2",
+       "tx_port_yellow_p3",
+       "tx_port_yellow_p4",
+       "tx_port_yellow_p5",
+       "tx_port_yellow_p6",
+       "tx_port_yellow_p7",
+       "rx_local_drop",
+       "tx_local_drop",
+};
+
+static void sparx5_get_queue_sys_stats(struct sparx5 *sparx5, int portno)
+{
+       u64 *portstats;
+       u64 *stats;
+       u32 addr;
+       int idx;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       mutex_lock(&sparx5->queue_stats_lock);
+       spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno), sparx5, XQS_STAT_CFG);
+       addr = 0;
+       stats = &portstats[spx5_stats_green_p0_rx_fwd];
+       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+               sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+       addr = 16;
+       stats = &portstats[spx5_stats_green_p0_rx_port_drop];
+       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+               sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+       addr = 256;
+       stats = &portstats[spx5_stats_green_p0_tx_port];
+       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+               sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_local_drop],
+                             spx5_rd(sparx5, XQS_CNT(32)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_local_drop],
+                             spx5_rd(sparx5, XQS_CNT(272)));
+       mutex_unlock(&sparx5->queue_stats_lock);
+}
+
+static void sparx5_get_ana_ac_stats_stats(struct sparx5 *sparx5, int portno)
+{
+       u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+
+       sparx5_update_counter(&portstats[spx5_stats_ana_ac_port_stat_lsb_cnt],
+                             spx5_rd(sparx5, ANA_AC_PORT_STAT_LSB_CNT(portno,
+                                                                      SPX5_PORT_POLICER_DROPS)));
+}
+
+static void sparx5_get_dev_phy_stats(u64 *portstats, void __iomem *inst, u32
+                                    tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SYMBOL_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SYMBOL_ERR_CNT(tinst)));
+}
+
+static void sparx5_get_dev_mac_stats(u64 *portstats, void __iomem *inst, u32
+                                    tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_UC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_TX_UC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_MC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_BC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_UC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_RX_UC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_MC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_BC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_CRC_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_CRC_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_ALIGNMENT_LOST_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_OK_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_OK_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_OK_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_OK_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_TX_MC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_TX_BC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_RX_MC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_RX_BC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_IN_RANGE_LEN_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_OVERSIZE_CNT(tinst)));
+}
+
+static void sparx5_get_dev_mac_ctrl_stats(u64 *portstats, void __iomem *inst,
+                                         u32 tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_PAUSE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_PAUSE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_PAUSE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_PAUSE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_UNSUP_OPCODE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(tinst)));
+}
+
+static void sparx5_get_dev_rmon_stats(u64 *portstats, void __iomem *inst, u32
+                                     tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_UNDERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_UNDERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_OVERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_FRAGMENTS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_FRAGMENTS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_JABBERS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_JABBERS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_SIZE64_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE64_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE65TO127_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE65TO127_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE128TO255_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE128TO255_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE256TO511_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE256TO511_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE512TO1023_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE512TO1023_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE1024TO1518_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE1024TO1518_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE1519TOMAX_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_SIZE64_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE64_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE65TO127_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE65TO127_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE128TO255_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE128TO255_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE256TO511_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE256TO511_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE512TO1023_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE512TO1023_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE1024TO1518_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE1024TO1518_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE1519TOMAX_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(tinst)));
+}
+
+static void sparx5_get_dev_misc_stats(u64 *portstats, void __iomem *inst, u32
+                                     tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_RX_ASSEMBLY_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_RX_ASSEMBLY_OK_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_RX_MERGE_FRAG_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_RX_SMD_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_TX_PFRAGMENT_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_BAD_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_BAD_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_IN_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_IPG_SHRINK_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_TAGGED_FRMS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_UNTAGGED_FRMS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_OUT_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_TAGGED_FRMS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_UNTAGGED_FRMS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_hih_cksm_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_HIH_CKSM_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_hih_cksm_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_xgmii_prot_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_XGMII_PROT_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_xgmii_prot_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(tinst)));
+}
+
+static void sparx5_get_device_stats(struct sparx5 *sparx5, int portno)
+{
+       u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+       u32 tinst = sparx5_port_dev_index(portno);
+       u32 dev = sparx5_to_high_dev(portno);
+       void __iomem *inst;
+
+       inst = spx5_inst_get(sparx5, dev, tinst);
+       sparx5_get_dev_phy_stats(portstats, inst, tinst);
+       sparx5_get_dev_mac_stats(portstats, inst, tinst);
+       sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
+       sparx5_get_dev_rmon_stats(portstats, inst, tinst);
+       sparx5_get_dev_misc_stats(portstats, inst, tinst);
+}
+
+static void sparx5_get_asm_phy_stats(u64 *portstats, void __iomem *inst, int
+                                    portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SYMBOL_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SYMBOL_ERR_CNT(portno)));
+}
+
+static void sparx5_get_asm_mac_stats(u64 *portstats, void __iomem *inst, int
+                                    portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt],
+                             spx5_inst_rd(inst, ASM_TX_UC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_TX_UC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt],
+                             spx5_inst_rd(inst, ASM_TX_MC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt],
+                             spx5_inst_rd(inst, ASM_TX_BC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_backoff1_cnt],
+                             spx5_inst_rd(inst, ASM_TX_BACKOFF1_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_multi_coll_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_MULTI_COLL_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt],
+                             spx5_inst_rd(inst, ASM_RX_UC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_RX_UC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt],
+                             spx5_inst_rd(inst, ASM_RX_MC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt],
+                             spx5_inst_rd(inst, ASM_RX_BC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt],
+                             spx5_inst_rd(inst, ASM_RX_CRC_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_CRC_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_ALIGNMENT_LOST_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_ALIGNMENT_LOST_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_TX_OK_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_OK_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_defer_cnt],
+                             spx5_inst_rd(inst, ASM_TX_DEFER_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_late_coll_cnt],
+                             spx5_inst_rd(inst, ASM_TX_LATE_COLL_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_xcoll_cnt],
+                             spx5_inst_rd(inst, ASM_TX_XCOLL_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_csense_cnt],
+                             spx5_inst_rd(inst, ASM_TX_CSENSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_RX_OK_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_OK_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_TX_MC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_TX_BC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_xdefer_cnt],
+                             spx5_inst_rd(inst, ASM_TX_XDEFER_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_RX_MC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_RX_BC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_IN_RANGE_LEN_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+                             spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_OVERSIZE_CNT(portno)));
+}
+
+static void sparx5_get_asm_mac_ctrl_stats(u64 *portstats, void __iomem *inst,
+                                         int portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt],
+                             spx5_inst_rd(inst, ASM_TX_PAUSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_PAUSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt],
+                             spx5_inst_rd(inst, ASM_RX_PAUSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_PAUSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_UNSUP_OPCODE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_UNSUP_OPCODE_CNT(portno)));
+}
+
+static void sparx5_get_asm_rmon_stats(u64 *portstats, void __iomem *inst, int
+                                     portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt],
+                             spx5_inst_rd(inst, ASM_RX_UNDERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_UNDERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+                             spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_OVERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt],
+                             spx5_inst_rd(inst, ASM_RX_FRAGMENTS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_FRAGMENTS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt],
+                             spx5_inst_rd(inst, ASM_RX_JABBERS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_JABBERS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt],
+                             spx5_inst_rd(inst, ASM_RX_SIZE64_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE64_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE65TO127_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE65TO127_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE128TO255_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE128TO255_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE256TO511_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE256TO511_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE512TO1023_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE512TO1023_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE1024TO1518_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE1024TO1518_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE1519TOMAX_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE1519TOMAX_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt],
+                             spx5_inst_rd(inst, ASM_TX_SIZE64_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE64_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE65TO127_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE65TO127_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE128TO255_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE128TO255_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE256TO511_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE256TO511_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE512TO1023_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE512TO1023_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE1024TO1518_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE1024TO1518_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE1519TOMAX_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE1519TOMAX_CNT(portno)));
+}
+
+static void sparx5_get_asm_misc_stats(u64 *portstats, void __iomem *inst, int
+                                     portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_RX_ASSEMBLY_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_RX_ASSEMBLY_OK_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_RX_MERGE_FRAG_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_RX_SMD_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_TX_PFRAGMENT_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_RX_BAD_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_BAD_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_RX_IN_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_IPG_SHRINK_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_sync_lost_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SYNC_LOST_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_TAGGED_FRMS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_UNTAGGED_FRMS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_TX_OUT_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_TAGGED_FRMS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_UNTAGGED_FRMS_CNT(portno)));
+}
+
+static void sparx5_get_asm_stats(struct sparx5 *sparx5, int portno)
+{
+       u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+       void __iomem *inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+
+       sparx5_get_asm_phy_stats(portstats, inst, portno);
+       sparx5_get_asm_mac_stats(portstats, inst, portno);
+       sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno);
+       sparx5_get_asm_rmon_stats(portstats, inst, portno);
+       sparx5_get_asm_misc_stats(portstats, inst, portno);
+}
+
+static const struct ethtool_rmon_hist_range sparx5_rmon_ranges[] = {
+       {    0,    64 },
+       {   65,   127 },
+       {  128,   255 },
+       {  256,   511 },
+       {  512,  1023 },
+       { 1024,  1518 },
+       { 1519, 10239 },
+       {}
+};
+
+static void sparx5_get_eth_phy_stats(struct net_device *ndev,
+                                    struct ethtool_eth_phy_stats *phy_stats)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_phy_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_phy_stats(portstats, inst, portno);
+       }
+       phy_stats->SymbolErrorDuringCarrier =
+               portstats[spx5_stats_rx_symbol_err_cnt] +
+               portstats[spx5_stats_pmac_rx_symbol_err_cnt];
+}
+
+static void sparx5_get_eth_mac_stats(struct net_device *ndev,
+                                    struct ethtool_eth_mac_stats *mac_stats)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_mac_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_mac_stats(portstats, inst, portno);
+       }
+       mac_stats->FramesTransmittedOK = portstats[spx5_stats_tx_uc_cnt] +
+               portstats[spx5_stats_pmac_tx_uc_cnt] +
+               portstats[spx5_stats_tx_mc_cnt] +
+               portstats[spx5_stats_tx_bc_cnt];
+       mac_stats->SingleCollisionFrames =
+               portstats[spx5_stats_tx_backoff1_cnt];
+       mac_stats->MultipleCollisionFrames =
+               portstats[spx5_stats_tx_multi_coll_cnt];
+       mac_stats->FramesReceivedOK = portstats[spx5_stats_rx_uc_cnt] +
+               portstats[spx5_stats_pmac_rx_uc_cnt] +
+               portstats[spx5_stats_rx_mc_cnt] +
+               portstats[spx5_stats_rx_bc_cnt];
+       mac_stats->FrameCheckSequenceErrors =
+               portstats[spx5_stats_rx_crc_err_cnt] +
+               portstats[spx5_stats_pmac_rx_crc_err_cnt];
+       mac_stats->AlignmentErrors = portstats[spx5_stats_rx_alignment_lost_cnt]
+               + portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+       mac_stats->OctetsTransmittedOK = portstats[spx5_stats_tx_ok_bytes_cnt] +
+               portstats[spx5_stats_pmac_tx_ok_bytes_cnt];
+       mac_stats->FramesWithDeferredXmissions =
+               portstats[spx5_stats_tx_defer_cnt];
+       mac_stats->LateCollisions =
+               portstats[spx5_stats_tx_late_coll_cnt];
+       mac_stats->FramesAbortedDueToXSColls =
+               portstats[spx5_stats_tx_xcoll_cnt];
+       mac_stats->CarrierSenseErrors = portstats[spx5_stats_tx_csense_cnt];
+       mac_stats->OctetsReceivedOK = portstats[spx5_stats_rx_ok_bytes_cnt] +
+               portstats[spx5_stats_pmac_rx_ok_bytes_cnt];
+       mac_stats->MulticastFramesXmittedOK = portstats[spx5_stats_tx_mc_cnt] +
+               portstats[spx5_stats_pmac_tx_mc_cnt];
+       mac_stats->BroadcastFramesXmittedOK = portstats[spx5_stats_tx_bc_cnt] +
+               portstats[spx5_stats_pmac_tx_bc_cnt];
+       mac_stats->FramesWithExcessiveDeferral =
+               portstats[spx5_stats_tx_xdefer_cnt];
+       mac_stats->MulticastFramesReceivedOK = portstats[spx5_stats_rx_mc_cnt] +
+               portstats[spx5_stats_pmac_rx_mc_cnt];
+       mac_stats->BroadcastFramesReceivedOK = portstats[spx5_stats_rx_bc_cnt] +
+               portstats[spx5_stats_pmac_rx_bc_cnt];
+       mac_stats->InRangeLengthErrors =
+               portstats[spx5_stats_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_in_range_len_err_cnt];
+       mac_stats->OutOfRangeLengthField =
+               portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt];
+       mac_stats->FrameTooLongErrors = portstats[spx5_stats_rx_oversize_cnt] +
+               portstats[spx5_stats_pmac_rx_oversize_cnt];
+}
+
+static void sparx5_get_eth_mac_ctrl_stats(struct net_device *ndev,
+                                         struct ethtool_eth_ctrl_stats *mac_ctrl_stats)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno);
+       }
+       mac_ctrl_stats->MACControlFramesTransmitted =
+               portstats[spx5_stats_tx_pause_cnt] +
+               portstats[spx5_stats_pmac_tx_pause_cnt];
+       mac_ctrl_stats->MACControlFramesReceived =
+               portstats[spx5_stats_rx_pause_cnt] +
+               portstats[spx5_stats_pmac_rx_pause_cnt];
+       mac_ctrl_stats->UnsupportedOpcodesReceived =
+               portstats[spx5_stats_rx_unsup_opcode_cnt] +
+               portstats[spx5_stats_pmac_rx_unsup_opcode_cnt];
+}
+
+static void sparx5_get_eth_rmon_stats(struct net_device *ndev,
+                                     struct ethtool_rmon_stats *rmon_stats,
+                                     const struct ethtool_rmon_hist_range **ranges)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_rmon_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_rmon_stats(portstats, inst, portno);
+       }
+       rmon_stats->undersize_pkts = portstats[spx5_stats_rx_undersize_cnt] +
+               portstats[spx5_stats_pmac_rx_undersize_cnt];
+       rmon_stats->oversize_pkts = portstats[spx5_stats_rx_oversize_cnt] +
+               portstats[spx5_stats_pmac_rx_oversize_cnt];
+       rmon_stats->fragments = portstats[spx5_stats_rx_fragments_cnt] +
+               portstats[spx5_stats_pmac_rx_fragments_cnt];
+       rmon_stats->jabbers = portstats[spx5_stats_rx_jabbers_cnt] +
+               portstats[spx5_stats_pmac_rx_jabbers_cnt];
+       rmon_stats->hist[0] = portstats[spx5_stats_rx_size64_cnt] +
+               portstats[spx5_stats_pmac_rx_size64_cnt];
+       rmon_stats->hist[1] = portstats[spx5_stats_rx_size65to127_cnt] +
+               portstats[spx5_stats_pmac_rx_size65to127_cnt];
+       rmon_stats->hist[2] = portstats[spx5_stats_rx_size128to255_cnt] +
+               portstats[spx5_stats_pmac_rx_size128to255_cnt];
+       rmon_stats->hist[3] = portstats[spx5_stats_rx_size256to511_cnt] +
+               portstats[spx5_stats_pmac_rx_size256to511_cnt];
+       rmon_stats->hist[4] = portstats[spx5_stats_rx_size512to1023_cnt] +
+               portstats[spx5_stats_pmac_rx_size512to1023_cnt];
+       rmon_stats->hist[5] = portstats[spx5_stats_rx_size1024to1518_cnt] +
+               portstats[spx5_stats_pmac_rx_size1024to1518_cnt];
+       rmon_stats->hist[6] = portstats[spx5_stats_rx_size1519tomax_cnt] +
+               portstats[spx5_stats_pmac_rx_size1519tomax_cnt];
+       rmon_stats->hist_tx[0] = portstats[spx5_stats_tx_size64_cnt] +
+               portstats[spx5_stats_pmac_tx_size64_cnt];
+       rmon_stats->hist_tx[1] = portstats[spx5_stats_tx_size65to127_cnt] +
+               portstats[spx5_stats_pmac_tx_size65to127_cnt];
+       rmon_stats->hist_tx[2] = portstats[spx5_stats_tx_size128to255_cnt] +
+               portstats[spx5_stats_pmac_tx_size128to255_cnt];
+       rmon_stats->hist_tx[3] = portstats[spx5_stats_tx_size256to511_cnt] +
+               portstats[spx5_stats_pmac_tx_size256to511_cnt];
+       rmon_stats->hist_tx[4] = portstats[spx5_stats_tx_size512to1023_cnt] +
+               portstats[spx5_stats_pmac_tx_size512to1023_cnt];
+       rmon_stats->hist_tx[5] = portstats[spx5_stats_tx_size1024to1518_cnt] +
+               portstats[spx5_stats_pmac_tx_size1024to1518_cnt];
+       rmon_stats->hist_tx[6] = portstats[spx5_stats_tx_size1519tomax_cnt] +
+               portstats[spx5_stats_pmac_tx_size1519tomax_cnt];
+       *ranges = sparx5_rmon_ranges;
+}
+
+static int sparx5_get_sset_count(struct net_device *ndev, int sset)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5  *sparx5 = port->sparx5;
+
+       if (sset != ETH_SS_STATS)
+               return -EOPNOTSUPP;
+       return sparx5->num_ethtool_stats;
+}
+
+static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5  *sparx5 = port->sparx5;
+       int idx;
+
+       if (sset != ETH_SS_STATS)
+               return;
+
+       for (idx = 0; idx < sparx5->num_ethtool_stats; idx++)
+               strncpy(data + idx * ETH_GSTRING_LEN,
+                       sparx5->stats_layout[idx], ETH_GSTRING_LEN);
+}
+
+static void sparx5_get_sset_data(struct net_device *ndev,
+                                struct ethtool_stats *stats, u64 *data)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+       int idx;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_misc_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_misc_stats(portstats, inst, portno);
+       }
+       sparx5_get_ana_ac_stats_stats(sparx5, portno);
+       sparx5_get_queue_sys_stats(sparx5, portno);
+       /* Copy port counters to the ethtool buffer */
+       for (idx = spx5_stats_mm_rx_assembly_err_cnt;
+            idx < spx5_stats_mm_rx_assembly_err_cnt +
+            sparx5->num_ethtool_stats; idx++)
+               *data++ = portstats[idx];
+}
+
+void sparx5_get_stats64(struct net_device *ndev,
+                       struct rtnl_link_stats64 *stats)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       u64 *portstats;
+       int idx;
+
+       if (!sparx5->stats)
+               return; /* Not initialized yet */
+
+       portstats = &sparx5->stats[port->portno * sparx5->num_stats];
+
+       stats->rx_packets = portstats[spx5_stats_rx_uc_cnt] +
+               portstats[spx5_stats_pmac_rx_uc_cnt] +
+               portstats[spx5_stats_rx_mc_cnt] +
+               portstats[spx5_stats_rx_bc_cnt];
+       stats->tx_packets = portstats[spx5_stats_tx_uc_cnt] +
+               portstats[spx5_stats_pmac_tx_uc_cnt] +
+               portstats[spx5_stats_tx_mc_cnt] +
+               portstats[spx5_stats_tx_bc_cnt];
+       stats->rx_bytes = portstats[spx5_stats_rx_ok_bytes_cnt] +
+               portstats[spx5_stats_pmac_rx_ok_bytes_cnt];
+       stats->tx_bytes = portstats[spx5_stats_tx_ok_bytes_cnt] +
+               portstats[spx5_stats_pmac_tx_ok_bytes_cnt];
+       stats->rx_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_rx_oversize_cnt] +
+               portstats[spx5_stats_pmac_rx_oversize_cnt] +
+               portstats[spx5_stats_rx_crc_err_cnt] +
+               portstats[spx5_stats_pmac_rx_crc_err_cnt] +
+               portstats[spx5_stats_rx_alignment_lost_cnt] +
+               portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+       stats->tx_errors = portstats[spx5_stats_tx_xcoll_cnt] +
+               portstats[spx5_stats_tx_csense_cnt] +
+               portstats[spx5_stats_tx_late_coll_cnt];
+       stats->multicast = portstats[spx5_stats_rx_mc_cnt] +
+               portstats[spx5_stats_pmac_rx_mc_cnt];
+       stats->collisions = portstats[spx5_stats_tx_late_coll_cnt] +
+               portstats[spx5_stats_tx_xcoll_cnt] +
+               portstats[spx5_stats_tx_backoff1_cnt];
+       stats->rx_length_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_rx_oversize_cnt] +
+               portstats[spx5_stats_pmac_rx_oversize_cnt];
+       stats->rx_crc_errors = portstats[spx5_stats_rx_crc_err_cnt] +
+               portstats[spx5_stats_pmac_rx_crc_err_cnt];
+       stats->rx_frame_errors = portstats[spx5_stats_rx_alignment_lost_cnt] +
+               portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+       stats->tx_aborted_errors = portstats[spx5_stats_tx_xcoll_cnt];
+       stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
+       stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
+       stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
+       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
+               stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+                                              + idx];
+       stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
+}
+
+static void sparx5_update_port_stats(struct sparx5 *sparx5, int portno)
+{
+       if (sparx5_is_baser(sparx5->ports[portno]->conf.portmode))
+               sparx5_get_device_stats(sparx5, portno);
+       else
+               sparx5_get_asm_stats(sparx5, portno);
+       sparx5_get_ana_ac_stats_stats(sparx5, portno);
+       sparx5_get_queue_sys_stats(sparx5, portno);
+}
+
+static void sparx5_update_stats(struct sparx5 *sparx5)
+{
+       int idx;
+
+       for (idx = 0; idx < SPX5_PORTS; idx++)
+               if (sparx5->ports[idx])
+                       sparx5_update_port_stats(sparx5, idx);
+}
+
+static void sparx5_check_stats_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct sparx5 *sparx5 = container_of(dwork,
+                                            struct sparx5,
+                                            stats_work);
+
+       sparx5_update_stats(sparx5);
+
+       queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
+                          SPX5_STATS_CHECK_DELAY);
+}
+
+static int sparx5_get_link_settings(struct net_device *ndev,
+                                   struct ethtool_link_ksettings *cmd)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+
+       return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int sparx5_set_link_settings(struct net_device *ndev,
+                                   const struct ethtool_link_ksettings *cmd)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+
+       return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void sparx5_config_stats(struct sparx5 *sparx5)
+{
+       /* Enable global events for port policer drops */
+       spx5_rmw(ANA_AC_PORT_SGE_CFG_MASK_SET(0xf0f0),
+                ANA_AC_PORT_SGE_CFG_MASK,
+                sparx5,
+                ANA_AC_PORT_SGE_CFG(SPX5_PORT_POLICER_DROPS));
+}
+
+static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
+{
+       /* Clear Queue System counters */
+       spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno) |
+               XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(3), sparx5,
+               XQS_STAT_CFG);
+
+       /* Use counter for port policer drop count */
+       spx5_rmw(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(1) |
+                ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(0) |
+                ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(0xff),
+                ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE |
+                ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE |
+                ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK,
+                sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS));
+}
+
+const struct ethtool_ops sparx5_ethtool_ops = {
+       .get_sset_count         = sparx5_get_sset_count,
+       .get_strings            = sparx5_get_sset_strings,
+       .get_ethtool_stats      = sparx5_get_sset_data,
+       .get_link_ksettings     = sparx5_get_link_settings,
+       .set_link_ksettings     = sparx5_set_link_settings,
+       .get_link               = ethtool_op_get_link,
+       .get_eth_phy_stats      = sparx5_get_eth_phy_stats,
+       .get_eth_mac_stats      = sparx5_get_eth_mac_stats,
+       .get_eth_ctrl_stats     = sparx5_get_eth_mac_ctrl_stats,
+       .get_rmon_stats         = sparx5_get_eth_rmon_stats,
+};
+
+int sparx_stats_init(struct sparx5 *sparx5)
+{
+       char queue_name[32];
+       int portno;
+
+       sparx5->stats_layout = sparx5_stats_layout;
+       sparx5->num_stats = spx5_stats_count;
+       sparx5->num_ethtool_stats = ARRAY_SIZE(sparx5_stats_layout);
+       sparx5->stats = devm_kcalloc(sparx5->dev,
+                                    SPX5_PORTS_ALL * sparx5->num_stats,
+                                    sizeof(u64), GFP_KERNEL);
+       if (!sparx5->stats)
+               return -ENOMEM;
+
+       mutex_init(&sparx5->queue_stats_lock);
+       sparx5_config_stats(sparx5);
+       for (portno = 0; portno < SPX5_PORTS; portno++)
+               if (sparx5->ports[portno])
+                       sparx5_config_port_stats(sparx5, portno);
+
+       snprintf(queue_name, sizeof(queue_name), "%s-stats",
+                dev_name(sparx5->dev));
+       sparx5->stats_queue = create_singlethread_workqueue(queue_name);
+       INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work);
+       queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
+                          SPX5_STATS_CHECK_DELAY);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
new file mode 100644 (file)
index 0000000..0443f66
--- /dev/null
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+/* Commands for Mac Table Command register */
+#define MAC_CMD_LEARN         0 /* Insert (Learn) 1 entry */
+#define MAC_CMD_UNLEARN       1 /* Unlearn (Forget) 1 entry */
+#define MAC_CMD_LOOKUP        2 /* Look up 1 entry */
+#define MAC_CMD_READ          3 /* Read entry at Mac Table Index */
+#define MAC_CMD_WRITE         4 /* Write entry at Mac Table Index */
+#define MAC_CMD_SCAN          5 /* Scan (Age or find next) */
+#define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */
+#define MAC_CMD_CLEAR_ALL     7 /* Delete all entries in table */
+
+/* Commands for MAC_ENTRY_ADDR_TYPE */
+#define  MAC_ENTRY_ADDR_TYPE_UPSID_PN         0
+#define  MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1
+#define  MAC_ENTRY_ADDR_TYPE_GLAG             2
+#define  MAC_ENTRY_ADDR_TYPE_MC_IDX           3
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+struct sparx5_mact_entry {
+       struct list_head list;
+       unsigned char mac[ETH_ALEN];
+       u32 flags;
+#define MAC_ENT_ALIVE  BIT(0)
+#define MAC_ENT_MOVED  BIT(1)
+#define MAC_ENT_LOCK   BIT(2)
+       u16 vid;
+       u16 port;
+};
+
+static int sparx5_mact_get_status(struct sparx5 *sparx5)
+{
+       return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL);
+}
+
+static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5)
+{
+       u32 val;
+
+       return readx_poll_timeout(sparx5_mact_get_status,
+               sparx5, val,
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0,
+               TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void sparx5_mact_select(struct sparx5 *sparx5,
+                              const unsigned char mac[ETH_ALEN],
+                              u16 vid)
+{
+       u32 macl = 0, mach = 0;
+
+       /* Set the MAC address to handle and the vlan associated in a format
+        * understood by the hardware.
+        */
+       mach |= vid    << 16;
+       mach |= mac[0] << 8;
+       mach |= mac[1] << 0;
+       macl |= mac[2] << 24;
+       macl |= mac[3] << 16;
+       macl |= mac[4] << 8;
+       macl |= mac[5] << 0;
+
+       spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0);
+       spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1);
+}
+
+int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
+                     const unsigned char mac[ETH_ALEN], u16 vid)
+{
+       int addr, type, ret;
+
+       if (pgid < SPX5_PORTS) {
+               type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
+               addr = pgid % 32;
+               addr += (pgid / 32) << 5; /* Add upsid */
+       } else {
+               type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
+               addr = pgid - SPX5_PORTS;
+       }
+
+       mutex_lock(&sparx5->lock);
+
+       sparx5_mact_select(sparx5, mac, vid);
+
+       /* MAC entry properties */
+       spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) |
+               LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) |
+               LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) |
+               LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1),
+               sparx5, LRN_MAC_ACCESS_CFG_2);
+       spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3);
+
+       /*  Insert/learn new entry */
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       ret = sparx5_mact_wait_for_completion(sparx5);
+
+       mutex_unlock(&sparx5->lock);
+
+       return ret;
+}
+
+int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+
+       return sparx5_mact_forget(sparx5, addr, port->pvid);
+}
+
+int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+
+       return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
+}
+
+static int sparx5_mact_get(struct sparx5 *sparx5,
+                          unsigned char mac[ETH_ALEN],
+                          u16 *vid, u32 *pcfg2)
+{
+       u32 mach, macl, cfg2;
+       int ret = -ENOENT;
+
+       cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
+       if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) {
+               mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0);
+               macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1);
+               mac[0] = ((mach >> 8)  & 0xff);
+               mac[1] = ((mach >> 0)  & 0xff);
+               mac[2] = ((macl >> 24) & 0xff);
+               mac[3] = ((macl >> 16) & 0xff);
+               mac[4] = ((macl >> 8)  & 0xff);
+               mac[5] = ((macl >> 0)  & 0xff);
+               *vid = mach >> 16;
+               *pcfg2 = cfg2;
+               ret = 0;
+       }
+
+       return ret;
+}
+
+bool sparx5_mact_getnext(struct sparx5 *sparx5,
+                        unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2)
+{
+       u32 cfg2;
+       int ret;
+
+       mutex_lock(&sparx5->lock);
+
+       sparx5_mact_select(sparx5, mac, *vid);
+
+       spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) |
+               LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
+               sparx5, LRN_SCAN_NEXT_CFG);
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
+               (MAC_CMD_FIND_SMALLEST) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       ret = sparx5_mact_wait_for_completion(sparx5);
+       if (ret == 0) {
+               ret = sparx5_mact_get(sparx5, mac, vid, &cfg2);
+               if (ret == 0)
+                       *pcfg2 = cfg2;
+       }
+
+       mutex_unlock(&sparx5->lock);
+
+       return ret == 0;
+}
+
+static int sparx5_mact_lookup(struct sparx5 *sparx5,
+                             const unsigned char mac[ETH_ALEN],
+                             u16 vid)
+{
+       int ret;
+
+       mutex_lock(&sparx5->lock);
+
+       sparx5_mact_select(sparx5, mac, vid);
+
+       /* Issue a lookup command */
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       ret = sparx5_mact_wait_for_completion(sparx5);
+       if (ret)
+               goto out;
+
+       ret = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET
+               (spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2));
+
+out:
+       mutex_unlock(&sparx5->lock);
+
+       return ret;
+}
+
+int sparx5_mact_forget(struct sparx5 *sparx5,
+                      const unsigned char mac[ETH_ALEN], u16 vid)
+{
+       int ret;
+
+       mutex_lock(&sparx5->lock);
+
+       sparx5_mact_select(sparx5, mac, vid);
+
+       /* Issue an unlearn command */
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       ret = sparx5_mact_wait_for_completion(sparx5);
+
+       mutex_unlock(&sparx5->lock);
+
+       return ret;
+}
+
+static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5,
+                                                 const unsigned char *mac,
+                                                 u16 vid, u16 port_index)
+{
+       struct sparx5_mact_entry *mact_entry;
+
+       mact_entry = devm_kzalloc(sparx5->dev,
+                                 sizeof(*mact_entry), GFP_ATOMIC);
+       if (!mact_entry)
+               return NULL;
+
+       memcpy(mact_entry->mac, mac, ETH_ALEN);
+       mact_entry->vid = vid;
+       mact_entry->port = port_index;
+       return mact_entry;
+}
+
+static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5,
+                                                const unsigned char *mac,
+                                                u16 vid, u16 port_index)
+{
+       struct sparx5_mact_entry *mact_entry;
+       struct sparx5_mact_entry *res = NULL;
+
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
+               if (mact_entry->vid == vid &&
+                   ether_addr_equal(mac, mact_entry->mac) &&
+                   mact_entry->port == port_index) {
+                       res = mact_entry;
+                       break;
+               }
+       }
+       mutex_unlock(&sparx5->mact_lock);
+
+       return res;
+}
+
+static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
+                                     const char *mac, u16 vid,
+                                     struct net_device *dev, bool offloaded)
+{
+       struct switchdev_notifier_fdb_info info;
+
+       info.addr = mac;
+       info.vid = vid;
+       info.offloaded = offloaded;
+       call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int sparx5_add_mact_entry(struct sparx5 *sparx5,
+                         struct sparx5_port *port,
+                         const unsigned char *addr, u16 vid)
+{
+       struct sparx5_mact_entry *mact_entry;
+       int ret;
+
+       ret = sparx5_mact_lookup(sparx5, addr, vid);
+       if (ret)
+               return 0;
+
+       /* In case the entry already exists, don't add it again to SW,
+        * just update HW, but we need to look in the actual HW because
+        * it is possible for an entry to be learn by HW and before the
+        * mact thread to start the frame will reach CPU and the CPU will
+        * add the entry but without the extern_learn flag.
+        */
+       mact_entry = find_mact_entry(sparx5, addr, vid, port->portno);
+       if (mact_entry)
+               goto update_hw;
+
+       /* Add the entry in SW MAC table not to get the notification when
+        * SW is pulling again
+        */
+       mact_entry = alloc_mact_entry(sparx5, addr, vid, port->portno);
+       if (!mact_entry)
+               return -ENOMEM;
+
+       mutex_lock(&sparx5->mact_lock);
+       list_add_tail(&mact_entry->list, &sparx5->mact_entries);
+       mutex_unlock(&sparx5->mact_lock);
+
+update_hw:
+       ret = sparx5_mact_learn(sparx5, port->portno, addr, vid);
+
+       /* New entry? */
+       if (mact_entry->flags == 0) {
+               mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
+               sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
+                                         port->ndev, true);
+       }
+
+       return ret;
+}
+
+int sparx5_del_mact_entry(struct sparx5 *sparx5,
+                         const unsigned char *addr,
+                         u16 vid)
+{
+       struct sparx5_mact_entry *mact_entry, *tmp;
+
+       /* Delete the entry in SW MAC table not to get the notification when
+        * SW is pulling again
+        */
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
+                                list) {
+               if ((vid == 0 || mact_entry->vid == vid) &&
+                   ether_addr_equal(addr, mact_entry->mac)) {
+                       list_del(&mact_entry->list);
+                       devm_kfree(sparx5->dev, mact_entry);
+
+                       sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+               }
+       }
+       mutex_unlock(&sparx5->mact_lock);
+
+       return 0;
+}
+
+static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
+                                    unsigned char mac[ETH_ALEN],
+                                    u16 vid, u32 cfg2)
+{
+       struct sparx5_mact_entry *mact_entry;
+       bool found = false;
+       u16 port;
+
+       if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) !=
+           MAC_ENTRY_ADDR_TYPE_UPSID_PN)
+               return;
+
+       port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
+       if (port >= SPX5_PORTS)
+               return;
+
+       if (!test_bit(port, sparx5->bridge_mask))
+               return;
+
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
+               if (mact_entry->vid == vid &&
+                   ether_addr_equal(mac, mact_entry->mac)) {
+                       found = true;
+                       mact_entry->flags |= MAC_ENT_ALIVE;
+                       if (mact_entry->port != port) {
+                               dev_warn(sparx5->dev, "Entry move: %d -> %d\n",
+                                        mact_entry->port, port);
+                               mact_entry->port = port;
+                               mact_entry->flags |= MAC_ENT_MOVED;
+                       }
+                       /* Entry handled */
+                       break;
+               }
+       }
+       mutex_unlock(&sparx5->mact_lock);
+
+       if (found && !(mact_entry->flags & MAC_ENT_MOVED))
+               /* Present, not moved */
+               return;
+
+       if (!found) {
+               /* Entry not found - now add */
+               mact_entry = alloc_mact_entry(sparx5, mac, vid, port);
+               if (!mact_entry)
+                       return;
+
+               mact_entry->flags |= MAC_ENT_ALIVE;
+               mutex_lock(&sparx5->mact_lock);
+               list_add_tail(&mact_entry->list, &sparx5->mact_entries);
+               mutex_unlock(&sparx5->mact_lock);
+       }
+
+       /* New or moved entry - notify bridge */
+       sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+                                 mac, vid, sparx5->ports[port]->ndev,
+                                 true);
+}
+
+void sparx5_mact_pull_work(struct work_struct *work)
+{
+       struct delayed_work *del_work = to_delayed_work(work);
+       struct sparx5 *sparx5 = container_of(del_work, struct sparx5,
+                                            mact_work);
+       struct sparx5_mact_entry *mact_entry, *tmp;
+       unsigned char mac[ETH_ALEN];
+       u32 cfg2;
+       u16 vid;
+       int ret;
+
+       /* Reset MAC entry flags */
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry(mact_entry, &sparx5->mact_entries, list)
+               mact_entry->flags &= MAC_ENT_LOCK;
+       mutex_unlock(&sparx5->mact_lock);
+
+       /* MAIN mac address processing loop */
+       vid = 0;
+       memset(mac, 0, sizeof(mac));
+       do {
+               mutex_lock(&sparx5->lock);
+               sparx5_mact_select(sparx5, mac, vid);
+               spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
+                       sparx5, LRN_SCAN_NEXT_CFG);
+               spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
+                       (MAC_CMD_FIND_SMALLEST) |
+                       LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+                       sparx5, LRN_COMMON_ACCESS_CTRL);
+               ret = sparx5_mact_wait_for_completion(sparx5);
+               if (ret == 0)
+                       ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2);
+               mutex_unlock(&sparx5->lock);
+               if (ret == 0)
+                       sparx5_mact_handle_entry(sparx5, mac, vid, cfg2);
+       } while (ret == 0);
+
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
+                                list) {
+               /* If the entry is in HW or permanent, then skip */
+               if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK))
+                       continue;
+
+               sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+                                         mact_entry->mac, mact_entry->vid,
+                                         sparx5->ports[mact_entry->port]->ndev,
+                                         true);
+
+               list_del(&mact_entry->list);
+               devm_kfree(sparx5->dev, mact_entry);
+       }
+       mutex_unlock(&sparx5->mact_lock);
+
+       queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
+                          SPX5_MACT_PULL_DELAY);
+}
+
+void sparx5_set_ageing(struct sparx5 *sparx5, int msecs)
+{
+       int value = max(1, msecs / 10); /* unit 10 ms */
+
+       spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */
+                LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */
+                LRN_AUTOAGE_CFG_UNIT_SIZE |
+                LRN_AUTOAGE_CFG_PERIOD_VAL,
+                sparx5,
+                LRN_AUTOAGE_CFG(0));
+}
+
+void sparx5_mact_init(struct sparx5 *sparx5)
+{
+       mutex_init(&sparx5->lock);
+
+       /*  Flush MAC table */
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       if (sparx5_mact_wait_for_completion(sparx5) != 0)
+               dev_warn(sparx5->dev, "MAC flush error\n");
+
+       sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
new file mode 100644 (file)
index 0000000..a325f7c
--- /dev/null
@@ -0,0 +1,852 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <net/switchdev.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/reset.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+#define QLIM_WM(fraction) \
+       ((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
+#define IO_RANGES 3
+
+struct initial_port_config {
+       u32 portno;
+       struct device_node *node;
+       struct sparx5_port_config conf;
+       struct phy *serdes;
+};
+
+struct sparx5_ram_config {
+       void __iomem *init_reg;
+       u32 init_val;
+};
+
+struct sparx5_main_io_resource {
+       enum sparx5_target id;
+       phys_addr_t offset;
+       int range;
+};
+
+static const struct sparx5_main_io_resource sparx5_main_iomap[] =  {
+       { TARGET_CPU,                         0, 0 }, /* 0x600000000 */
+       { TARGET_FDMA,                  0x80000, 0 }, /* 0x600080000 */
+       { TARGET_PCEP,                 0x400000, 0 }, /* 0x600400000 */
+       { TARGET_DEV2G5,             0x10004000, 1 }, /* 0x610004000 */
+       { TARGET_DEV5G,              0x10008000, 1 }, /* 0x610008000 */
+       { TARGET_PCS5G_BR,           0x1000c000, 1 }, /* 0x61000c000 */
+       { TARGET_DEV2G5 +  1,        0x10010000, 1 }, /* 0x610010000 */
+       { TARGET_DEV5G +  1,         0x10014000, 1 }, /* 0x610014000 */
+       { TARGET_PCS5G_BR +  1,      0x10018000, 1 }, /* 0x610018000 */
+       { TARGET_DEV2G5 +  2,        0x1001c000, 1 }, /* 0x61001c000 */
+       { TARGET_DEV5G +  2,         0x10020000, 1 }, /* 0x610020000 */
+       { TARGET_PCS5G_BR +  2,      0x10024000, 1 }, /* 0x610024000 */
+       { TARGET_DEV2G5 +  6,        0x10028000, 1 }, /* 0x610028000 */
+       { TARGET_DEV5G +  6,         0x1002c000, 1 }, /* 0x61002c000 */
+       { TARGET_PCS5G_BR +  6,      0x10030000, 1 }, /* 0x610030000 */
+       { TARGET_DEV2G5 +  7,        0x10034000, 1 }, /* 0x610034000 */
+       { TARGET_DEV5G +  7,         0x10038000, 1 }, /* 0x610038000 */
+       { TARGET_PCS5G_BR +  7,      0x1003c000, 1 }, /* 0x61003c000 */
+       { TARGET_DEV2G5 +  8,        0x10040000, 1 }, /* 0x610040000 */
+       { TARGET_DEV5G +  8,         0x10044000, 1 }, /* 0x610044000 */
+       { TARGET_PCS5G_BR +  8,      0x10048000, 1 }, /* 0x610048000 */
+       { TARGET_DEV2G5 +  9,        0x1004c000, 1 }, /* 0x61004c000 */
+       { TARGET_DEV5G +  9,         0x10050000, 1 }, /* 0x610050000 */
+       { TARGET_PCS5G_BR +  9,      0x10054000, 1 }, /* 0x610054000 */
+       { TARGET_DEV2G5 + 10,        0x10058000, 1 }, /* 0x610058000 */
+       { TARGET_DEV5G + 10,         0x1005c000, 1 }, /* 0x61005c000 */
+       { TARGET_PCS5G_BR + 10,      0x10060000, 1 }, /* 0x610060000 */
+       { TARGET_DEV2G5 + 11,        0x10064000, 1 }, /* 0x610064000 */
+       { TARGET_DEV5G + 11,         0x10068000, 1 }, /* 0x610068000 */
+       { TARGET_PCS5G_BR + 11,      0x1006c000, 1 }, /* 0x61006c000 */
+       { TARGET_DEV2G5 + 12,        0x10070000, 1 }, /* 0x610070000 */
+       { TARGET_DEV10G,             0x10074000, 1 }, /* 0x610074000 */
+       { TARGET_PCS10G_BR,          0x10078000, 1 }, /* 0x610078000 */
+       { TARGET_DEV2G5 + 14,        0x1007c000, 1 }, /* 0x61007c000 */
+       { TARGET_DEV10G +  2,        0x10080000, 1 }, /* 0x610080000 */
+       { TARGET_PCS10G_BR +  2,     0x10084000, 1 }, /* 0x610084000 */
+       { TARGET_DEV2G5 + 15,        0x10088000, 1 }, /* 0x610088000 */
+       { TARGET_DEV10G +  3,        0x1008c000, 1 }, /* 0x61008c000 */
+       { TARGET_PCS10G_BR +  3,     0x10090000, 1 }, /* 0x610090000 */
+       { TARGET_DEV2G5 + 16,        0x10094000, 1 }, /* 0x610094000 */
+       { TARGET_DEV2G5 + 17,        0x10098000, 1 }, /* 0x610098000 */
+       { TARGET_DEV2G5 + 18,        0x1009c000, 1 }, /* 0x61009c000 */
+       { TARGET_DEV2G5 + 19,        0x100a0000, 1 }, /* 0x6100a0000 */
+       { TARGET_DEV2G5 + 20,        0x100a4000, 1 }, /* 0x6100a4000 */
+       { TARGET_DEV2G5 + 21,        0x100a8000, 1 }, /* 0x6100a8000 */
+       { TARGET_DEV2G5 + 22,        0x100ac000, 1 }, /* 0x6100ac000 */
+       { TARGET_DEV2G5 + 23,        0x100b0000, 1 }, /* 0x6100b0000 */
+       { TARGET_DEV2G5 + 32,        0x100b4000, 1 }, /* 0x6100b4000 */
+       { TARGET_DEV2G5 + 33,        0x100b8000, 1 }, /* 0x6100b8000 */
+       { TARGET_DEV2G5 + 34,        0x100bc000, 1 }, /* 0x6100bc000 */
+       { TARGET_DEV2G5 + 35,        0x100c0000, 1 }, /* 0x6100c0000 */
+       { TARGET_DEV2G5 + 36,        0x100c4000, 1 }, /* 0x6100c4000 */
+       { TARGET_DEV2G5 + 37,        0x100c8000, 1 }, /* 0x6100c8000 */
+       { TARGET_DEV2G5 + 38,        0x100cc000, 1 }, /* 0x6100cc000 */
+       { TARGET_DEV2G5 + 39,        0x100d0000, 1 }, /* 0x6100d0000 */
+       { TARGET_DEV2G5 + 40,        0x100d4000, 1 }, /* 0x6100d4000 */
+       { TARGET_DEV2G5 + 41,        0x100d8000, 1 }, /* 0x6100d8000 */
+       { TARGET_DEV2G5 + 42,        0x100dc000, 1 }, /* 0x6100dc000 */
+       { TARGET_DEV2G5 + 43,        0x100e0000, 1 }, /* 0x6100e0000 */
+       { TARGET_DEV2G5 + 44,        0x100e4000, 1 }, /* 0x6100e4000 */
+       { TARGET_DEV2G5 + 45,        0x100e8000, 1 }, /* 0x6100e8000 */
+       { TARGET_DEV2G5 + 46,        0x100ec000, 1 }, /* 0x6100ec000 */
+       { TARGET_DEV2G5 + 47,        0x100f0000, 1 }, /* 0x6100f0000 */
+       { TARGET_DEV2G5 + 57,        0x100f4000, 1 }, /* 0x6100f4000 */
+       { TARGET_DEV25G +  1,        0x100f8000, 1 }, /* 0x6100f8000 */
+       { TARGET_PCS25G_BR +  1,     0x100fc000, 1 }, /* 0x6100fc000 */
+       { TARGET_DEV2G5 + 59,        0x10104000, 1 }, /* 0x610104000 */
+       { TARGET_DEV25G +  3,        0x10108000, 1 }, /* 0x610108000 */
+       { TARGET_PCS25G_BR +  3,     0x1010c000, 1 }, /* 0x61010c000 */
+       { TARGET_DEV2G5 + 60,        0x10114000, 1 }, /* 0x610114000 */
+       { TARGET_DEV25G +  4,        0x10118000, 1 }, /* 0x610118000 */
+       { TARGET_PCS25G_BR +  4,     0x1011c000, 1 }, /* 0x61011c000 */
+       { TARGET_DEV2G5 + 64,        0x10124000, 1 }, /* 0x610124000 */
+       { TARGET_DEV5G + 12,         0x10128000, 1 }, /* 0x610128000 */
+       { TARGET_PCS5G_BR + 12,      0x1012c000, 1 }, /* 0x61012c000 */
+       { TARGET_PORT_CONF,          0x10130000, 1 }, /* 0x610130000 */
+       { TARGET_DEV2G5 +  3,        0x10404000, 1 }, /* 0x610404000 */
+       { TARGET_DEV5G +  3,         0x10408000, 1 }, /* 0x610408000 */
+       { TARGET_PCS5G_BR +  3,      0x1040c000, 1 }, /* 0x61040c000 */
+       { TARGET_DEV2G5 +  4,        0x10410000, 1 }, /* 0x610410000 */
+       { TARGET_DEV5G +  4,         0x10414000, 1 }, /* 0x610414000 */
+       { TARGET_PCS5G_BR +  4,      0x10418000, 1 }, /* 0x610418000 */
+       { TARGET_DEV2G5 +  5,        0x1041c000, 1 }, /* 0x61041c000 */
+       { TARGET_DEV5G +  5,         0x10420000, 1 }, /* 0x610420000 */
+       { TARGET_PCS5G_BR +  5,      0x10424000, 1 }, /* 0x610424000 */
+       { TARGET_DEV2G5 + 13,        0x10428000, 1 }, /* 0x610428000 */
+       { TARGET_DEV10G +  1,        0x1042c000, 1 }, /* 0x61042c000 */
+       { TARGET_PCS10G_BR +  1,     0x10430000, 1 }, /* 0x610430000 */
+       { TARGET_DEV2G5 + 24,        0x10434000, 1 }, /* 0x610434000 */
+       { TARGET_DEV2G5 + 25,        0x10438000, 1 }, /* 0x610438000 */
+       { TARGET_DEV2G5 + 26,        0x1043c000, 1 }, /* 0x61043c000 */
+       { TARGET_DEV2G5 + 27,        0x10440000, 1 }, /* 0x610440000 */
+       { TARGET_DEV2G5 + 28,        0x10444000, 1 }, /* 0x610444000 */
+       { TARGET_DEV2G5 + 29,        0x10448000, 1 }, /* 0x610448000 */
+       { TARGET_DEV2G5 + 30,        0x1044c000, 1 }, /* 0x61044c000 */
+       { TARGET_DEV2G5 + 31,        0x10450000, 1 }, /* 0x610450000 */
+       { TARGET_DEV2G5 + 48,        0x10454000, 1 }, /* 0x610454000 */
+       { TARGET_DEV10G +  4,        0x10458000, 1 }, /* 0x610458000 */
+       { TARGET_PCS10G_BR +  4,     0x1045c000, 1 }, /* 0x61045c000 */
+       { TARGET_DEV2G5 + 49,        0x10460000, 1 }, /* 0x610460000 */
+       { TARGET_DEV10G +  5,        0x10464000, 1 }, /* 0x610464000 */
+       { TARGET_PCS10G_BR +  5,     0x10468000, 1 }, /* 0x610468000 */
+       { TARGET_DEV2G5 + 50,        0x1046c000, 1 }, /* 0x61046c000 */
+       { TARGET_DEV10G +  6,        0x10470000, 1 }, /* 0x610470000 */
+       { TARGET_PCS10G_BR +  6,     0x10474000, 1 }, /* 0x610474000 */
+       { TARGET_DEV2G5 + 51,        0x10478000, 1 }, /* 0x610478000 */
+       { TARGET_DEV10G +  7,        0x1047c000, 1 }, /* 0x61047c000 */
+       { TARGET_PCS10G_BR +  7,     0x10480000, 1 }, /* 0x610480000 */
+       { TARGET_DEV2G5 + 52,        0x10484000, 1 }, /* 0x610484000 */
+       { TARGET_DEV10G +  8,        0x10488000, 1 }, /* 0x610488000 */
+       { TARGET_PCS10G_BR +  8,     0x1048c000, 1 }, /* 0x61048c000 */
+       { TARGET_DEV2G5 + 53,        0x10490000, 1 }, /* 0x610490000 */
+       { TARGET_DEV10G +  9,        0x10494000, 1 }, /* 0x610494000 */
+       { TARGET_PCS10G_BR +  9,     0x10498000, 1 }, /* 0x610498000 */
+       { TARGET_DEV2G5 + 54,        0x1049c000, 1 }, /* 0x61049c000 */
+       { TARGET_DEV10G + 10,        0x104a0000, 1 }, /* 0x6104a0000 */
+       { TARGET_PCS10G_BR + 10,     0x104a4000, 1 }, /* 0x6104a4000 */
+       { TARGET_DEV2G5 + 55,        0x104a8000, 1 }, /* 0x6104a8000 */
+       { TARGET_DEV10G + 11,        0x104ac000, 1 }, /* 0x6104ac000 */
+       { TARGET_PCS10G_BR + 11,     0x104b0000, 1 }, /* 0x6104b0000 */
+       { TARGET_DEV2G5 + 56,        0x104b4000, 1 }, /* 0x6104b4000 */
+       { TARGET_DEV25G,             0x104b8000, 1 }, /* 0x6104b8000 */
+       { TARGET_PCS25G_BR,          0x104bc000, 1 }, /* 0x6104bc000 */
+       { TARGET_DEV2G5 + 58,        0x104c4000, 1 }, /* 0x6104c4000 */
+       { TARGET_DEV25G +  2,        0x104c8000, 1 }, /* 0x6104c8000 */
+       { TARGET_PCS25G_BR +  2,     0x104cc000, 1 }, /* 0x6104cc000 */
+       { TARGET_DEV2G5 + 61,        0x104d4000, 1 }, /* 0x6104d4000 */
+       { TARGET_DEV25G +  5,        0x104d8000, 1 }, /* 0x6104d8000 */
+       { TARGET_PCS25G_BR +  5,     0x104dc000, 1 }, /* 0x6104dc000 */
+       { TARGET_DEV2G5 + 62,        0x104e4000, 1 }, /* 0x6104e4000 */
+       { TARGET_DEV25G +  6,        0x104e8000, 1 }, /* 0x6104e8000 */
+       { TARGET_PCS25G_BR +  6,     0x104ec000, 1 }, /* 0x6104ec000 */
+       { TARGET_DEV2G5 + 63,        0x104f4000, 1 }, /* 0x6104f4000 */
+       { TARGET_DEV25G +  7,        0x104f8000, 1 }, /* 0x6104f8000 */
+       { TARGET_PCS25G_BR +  7,     0x104fc000, 1 }, /* 0x6104fc000 */
+       { TARGET_DSM,                0x10504000, 1 }, /* 0x610504000 */
+       { TARGET_ASM,                0x10600000, 1 }, /* 0x610600000 */
+       { TARGET_GCB,                0x11010000, 2 }, /* 0x611010000 */
+       { TARGET_QS,                 0x11030000, 2 }, /* 0x611030000 */
+       { TARGET_ANA_ACL,            0x11050000, 2 }, /* 0x611050000 */
+       { TARGET_LRN,                0x11060000, 2 }, /* 0x611060000 */
+       { TARGET_VCAP_SUPER,         0x11080000, 2 }, /* 0x611080000 */
+       { TARGET_QSYS,               0x110a0000, 2 }, /* 0x6110a0000 */
+       { TARGET_QFWD,               0x110b0000, 2 }, /* 0x6110b0000 */
+       { TARGET_XQS,                0x110c0000, 2 }, /* 0x6110c0000 */
+       { TARGET_CLKGEN,             0x11100000, 2 }, /* 0x611100000 */
+       { TARGET_ANA_AC_POL,         0x11200000, 2 }, /* 0x611200000 */
+       { TARGET_QRES,               0x11280000, 2 }, /* 0x611280000 */
+       { TARGET_EACL,               0x112c0000, 2 }, /* 0x6112c0000 */
+       { TARGET_ANA_CL,             0x11400000, 2 }, /* 0x611400000 */
+       { TARGET_ANA_L3,             0x11480000, 2 }, /* 0x611480000 */
+       { TARGET_HSCH,               0x11580000, 2 }, /* 0x611580000 */
+       { TARGET_REW,                0x11600000, 2 }, /* 0x611600000 */
+       { TARGET_ANA_L2,             0x11800000, 2 }, /* 0x611800000 */
+       { TARGET_ANA_AC,             0x11900000, 2 }, /* 0x611900000 */
+       { TARGET_VOP,                0x11a00000, 2 }, /* 0x611a00000 */
+};
+
+static int sparx5_create_targets(struct sparx5 *sparx5)
+{
+       struct resource *iores[IO_RANGES];
+       void __iomem *iomem[IO_RANGES];
+       void __iomem *begin[IO_RANGES];
+       int range_id[IO_RANGES];
+       int idx, jdx;
+
+       for (idx = 0, jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
+               const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+
+               if (idx == iomap->range) {
+                       range_id[idx] = jdx;
+                       idx++;
+               }
+       }
+       for (idx = 0; idx < IO_RANGES; idx++) {
+               iores[idx] = platform_get_resource(sparx5->pdev, IORESOURCE_MEM,
+                                                  idx);
+               iomem[idx] = devm_ioremap(sparx5->dev,
+                                         iores[idx]->start,
+                                         iores[idx]->end - iores[idx]->start
+                                         + 1);
+               if (IS_ERR(iomem[idx])) {
+                       dev_err(sparx5->dev, "Unable to get switch registers: %s\n",
+                               iores[idx]->name);
+                       return PTR_ERR(iomem[idx]);
+               }
+               begin[idx] = iomem[idx] - sparx5_main_iomap[range_id[idx]].offset;
+       }
+       for (jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
+               const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+
+               sparx5->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+       }
+       return 0;
+}
+
+static int sparx5_create_port(struct sparx5 *sparx5,
+                             struct initial_port_config *config)
+{
+       struct sparx5_port *spx5_port;
+       struct net_device *ndev;
+       struct phylink *phylink;
+       int err;
+
+       ndev = sparx5_create_netdev(sparx5, config->portno);
+       if (IS_ERR(ndev)) {
+               dev_err(sparx5->dev, "Could not create net device: %02u\n",
+                       config->portno);
+               return PTR_ERR(ndev);
+       }
+       spx5_port = netdev_priv(ndev);
+       spx5_port->of_node = config->node;
+       spx5_port->serdes = config->serdes;
+       spx5_port->pvid = NULL_VID;
+       spx5_port->signd_internal = true;
+       spx5_port->signd_active_high = true;
+       spx5_port->signd_enable = true;
+       spx5_port->max_vlan_tags = SPX5_PORT_MAX_TAGS_NONE;
+       spx5_port->vlan_type = SPX5_VLAN_PORT_TYPE_UNAWARE;
+       spx5_port->custom_etype = 0x8880; /* Vitesse */
+       spx5_port->phylink_pcs.poll = true;
+       spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
+       sparx5->ports[config->portno] = spx5_port;
+
+       err = sparx5_port_init(sparx5, spx5_port, &config->conf);
+       if (err) {
+               dev_err(sparx5->dev, "port init failed\n");
+               return err;
+       }
+       spx5_port->conf = config->conf;
+
+       /* Setup VLAN */
+       sparx5_vlan_port_setup(sparx5, spx5_port->portno);
+
+       /* Create a phylink for PHY management.  Also handles SFPs */
+       spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
+       spx5_port->phylink_config.type = PHYLINK_NETDEV;
+       spx5_port->phylink_config.pcs_poll = true;
+
+       phylink = phylink_create(&spx5_port->phylink_config,
+                                of_fwnode_handle(config->node),
+                                config->conf.phy_mode,
+                                &sparx5_phylink_mac_ops);
+       if (IS_ERR(phylink))
+               return PTR_ERR(phylink);
+
+       spx5_port->phylink = phylink;
+       phylink_set_pcs(phylink, &spx5_port->phylink_pcs);
+
+       return 0;
+}
+
+static int sparx5_init_ram(struct sparx5 *s5)
+{
+       const struct sparx5_ram_config spx5_ram_cfg[] = {
+               {spx5_reg_get(s5, ANA_AC_STAT_RESET), ANA_AC_STAT_RESET_RESET},
+               {spx5_reg_get(s5, ASM_STAT_CFG), ASM_STAT_CFG_STAT_CNT_CLR_SHOT},
+               {spx5_reg_get(s5, QSYS_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, REW_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, VOP_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, ANA_AC_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, ASM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, EACL_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, VCAP_SUPER_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, DSM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}
+       };
+       const struct sparx5_ram_config *cfg;
+       u32 value, pending, jdx, idx;
+
+       for (jdx = 0; jdx < 10; jdx++) {
+               pending = ARRAY_SIZE(spx5_ram_cfg);
+               for (idx = 0; idx < ARRAY_SIZE(spx5_ram_cfg); idx++) {
+                       cfg = &spx5_ram_cfg[idx];
+                       if (jdx == 0) {
+                               writel(cfg->init_val, cfg->init_reg);
+                       } else {
+                               value = readl(cfg->init_reg);
+                               if ((value & cfg->init_val) != cfg->init_val)
+                                       pending--;
+                       }
+               }
+               if (!pending)
+                       break;
+               usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+       }
+
+       if (pending > 0) {
+               /* Still initializing, should be complete in
+                * less than 1ms
+                */
+               dev_err(s5->dev, "Memory initialization error\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int sparx5_init_switchcore(struct sparx5 *sparx5)
+{
+       u32 value;
+       int err = 0;
+
+       spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(1),
+                EACL_POL_EACL_CFG_EACL_FORCE_INIT,
+                sparx5,
+                EACL_POL_EACL_CFG);
+
+       spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(0),
+                EACL_POL_EACL_CFG_EACL_FORCE_INIT,
+                sparx5,
+                EACL_POL_EACL_CFG);
+
+       /* Initialize memories, if not done already */
+       value = spx5_rd(sparx5, HSCH_RESET_CFG);
+       if (!(value & HSCH_RESET_CFG_CORE_ENA)) {
+               err = sparx5_init_ram(sparx5);
+               if (err)
+                       return err;
+       }
+
+       /* Reset counters */
+       spx5_wr(ANA_AC_STAT_RESET_RESET_SET(1), sparx5, ANA_AC_STAT_RESET);
+       spx5_wr(ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(1), sparx5, ASM_STAT_CFG);
+
+       /* Enable switch-core and queue system */
+       spx5_wr(HSCH_RESET_CFG_CORE_ENA_SET(1), sparx5, HSCH_RESET_CFG);
+
+       return 0;
+}
+
+static int sparx5_init_coreclock(struct sparx5 *sparx5)
+{
+       enum sparx5_core_clockfreq freq = sparx5->coreclock;
+       u32 clk_div, clk_period, pol_upd_int, idx;
+
+       /* Verify if core clock frequency is supported on target.
+        * If 'VTSS_CORE_CLOCK_DEFAULT' then the highest supported
+        * freq. is used
+        */
+       switch (sparx5->target_ct) {
+       case SPX5_TARGET_CT_7546:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_250MHZ;
+               else if (sparx5->coreclock != SPX5_CORE_CLOCK_250MHZ)
+                       freq = 0; /* Not supported */
+               break;
+       case SPX5_TARGET_CT_7549:
+       case SPX5_TARGET_CT_7552:
+       case SPX5_TARGET_CT_7556:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_500MHZ;
+               else if (sparx5->coreclock != SPX5_CORE_CLOCK_500MHZ)
+                       freq = 0; /* Not supported */
+               break;
+       case SPX5_TARGET_CT_7558:
+       case SPX5_TARGET_CT_7558TSN:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_625MHZ;
+               else if (sparx5->coreclock != SPX5_CORE_CLOCK_625MHZ)
+                       freq = 0; /* Not supported */
+               break;
+       case SPX5_TARGET_CT_7546TSN:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_625MHZ;
+               break;
+       case SPX5_TARGET_CT_7549TSN:
+       case SPX5_TARGET_CT_7552TSN:
+       case SPX5_TARGET_CT_7556TSN:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_625MHZ;
+               else if (sparx5->coreclock == SPX5_CORE_CLOCK_250MHZ)
+                       freq = 0; /* Not supported */
+               break;
+       default:
+               dev_err(sparx5->dev, "Target (%#04x) not supported\n",
+                       sparx5->target_ct);
+               return -ENODEV;
+       }
+
+       switch (freq) {
+       case SPX5_CORE_CLOCK_250MHZ:
+               clk_div = 10;
+               pol_upd_int = 312;
+               break;
+       case SPX5_CORE_CLOCK_500MHZ:
+               clk_div = 5;
+               pol_upd_int = 624;
+               break;
+       case SPX5_CORE_CLOCK_625MHZ:
+               clk_div = 4;
+               pol_upd_int = 780;
+               break;
+       default:
+               dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n",
+                       sparx5->coreclock, sparx5->target_ct);
+               return -EINVAL;
+       }
+
+       /* Update state with chosen frequency */
+       sparx5->coreclock = freq;
+
+       /* Configure the LCPLL */
+       spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
+                sparx5,
+                CLKGEN_LCPLL1_CORE_CLK_CFG);
+
+       clk_period = sparx5_clk_period(freq);
+
+       spx5_rmw(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(clk_period / 100),
+                HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS,
+                sparx5,
+                HSCH_SYS_CLK_PER);
+
+       spx5_rmw(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
+                ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS,
+                sparx5,
+                ANA_AC_POL_BDLB_DLB_CTRL);
+
+       spx5_rmw(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
+                ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS,
+                sparx5,
+                ANA_AC_POL_SLB_DLB_CTRL);
+
+       spx5_rmw(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(clk_period / 100),
+                LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS,
+                sparx5,
+                LRN_AUTOAGE_CFG_1);
+
+       for (idx = 0; idx < 3; idx++)
+               spx5_rmw(GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(clk_period / 100),
+                        GCB_SIO_CLOCK_SYS_CLK_PERIOD,
+                        sparx5,
+                        GCB_SIO_CLOCK(idx));
+
+       spx5_rmw(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET
+                ((256 * 1000) / clk_period),
+                HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY,
+                sparx5,
+                HSCH_TAS_STATEMACHINE_CFG);
+
+       spx5_rmw(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(pol_upd_int),
+                ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT,
+                sparx5,
+                ANA_AC_POL_POL_UPD_INT_CFG);
+
+       return 0;
+}
+
+static int sparx5_qlim_set(struct sparx5 *sparx5)
+{
+       u32 res, dp, prio;
+
+       for (res = 0; res < 2; res++) {
+               for (prio = 0; prio < 8; prio++)
+                       spx5_wr(0xFFF, sparx5,
+                               QRES_RES_CFG(prio + 630 + res * 1024));
+
+               for (dp = 0; dp < 4; dp++)
+                       spx5_wr(0xFFF, sparx5,
+                               QRES_RES_CFG(dp + 638 + res * 1024));
+       }
+
+       /* Set 80,90,95,100% of memory size for top watermarks */
+       spx5_wr(QLIM_WM(80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0));
+       spx5_wr(QLIM_WM(90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0));
+       spx5_wr(QLIM_WM(95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0));
+       spx5_wr(QLIM_WM(100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0));
+
+       return 0;
+}
+
+/* Some boards needs to map the SGPIO for signal detect explicitly to the
+ * port module
+ */
+static void sparx5_board_init(struct sparx5 *sparx5)
+{
+       int idx;
+
+       if (!sparx5->sd_sgpio_remapping)
+               return;
+
+       /* Enable SGPIO Signal Detect remapping */
+       spx5_rmw(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL,
+                GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL,
+                sparx5,
+                GCB_HW_SGPIO_SD_CFG);
+
+       /* Refer to LOS SGPIO */
+       for (idx = 0; idx < SPX5_PORTS; idx++)
+               if (sparx5->ports[idx])
+                       if (sparx5->ports[idx]->conf.sd_sgpio != ~0)
+                               spx5_wr(sparx5->ports[idx]->conf.sd_sgpio,
+                                       sparx5,
+                                       GCB_HW_SGPIO_TO_SD_MAP_CFG(idx));
+}
+
+static int sparx5_start(struct sparx5 *sparx5)
+{
+       u8 broadcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       char queue_name[32];
+       u32 idx;
+       int err;
+
+       /* Setup own UPSIDs */
+       for (idx = 0; idx < 3; idx++) {
+               spx5_wr(idx, sparx5, ANA_AC_OWN_UPSID(idx));
+               spx5_wr(idx, sparx5, ANA_CL_OWN_UPSID(idx));
+               spx5_wr(idx, sparx5, ANA_L2_OWN_UPSID(idx));
+               spx5_wr(idx, sparx5, REW_OWN_UPSID(idx));
+       }
+
+       /* Enable CPU ports */
+       for (idx = SPX5_PORTS; idx < SPX5_PORTS_ALL; idx++)
+               spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1),
+                        QFWD_SWITCH_PORT_MODE_PORT_ENA,
+                        sparx5,
+                        QFWD_SWITCH_PORT_MODE(idx));
+
+       /* Init masks */
+       sparx5_update_fwd(sparx5);
+
+       /* CPU copy CPU pgids */
+       spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+               sparx5, ANA_AC_PGID_MISC_CFG(PGID_CPU));
+       spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+               sparx5, ANA_AC_PGID_MISC_CFG(PGID_BCAST));
+
+       /* Recalc injected frame FCS */
+       for (idx = SPX5_PORT_CPU_0; idx <= SPX5_PORT_CPU_1; idx++)
+               spx5_rmw(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(1),
+                        ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA,
+                        sparx5, ANA_CL_FILTER_CTRL(idx));
+
+       /* Init MAC table, ageing */
+       sparx5_mact_init(sparx5);
+
+       /* Setup VLANs */
+       sparx5_vlan_init(sparx5);
+
+       /* Add host mode BC address (points only to CPU) */
+       sparx5_mact_learn(sparx5, PGID_CPU, broadcast, NULL_VID);
+
+       /* Enable queue limitation watermarks */
+       sparx5_qlim_set(sparx5);
+
+       err = sparx5_config_auto_calendar(sparx5);
+       if (err)
+               return err;
+
+       err = sparx5_config_dsm_calendar(sparx5);
+       if (err)
+               return err;
+
+       /* Init stats */
+       err = sparx_stats_init(sparx5);
+       if (err)
+               return err;
+
+       /* Init mact_sw struct */
+       mutex_init(&sparx5->mact_lock);
+       INIT_LIST_HEAD(&sparx5->mact_entries);
+       snprintf(queue_name, sizeof(queue_name), "%s-mact",
+                dev_name(sparx5->dev));
+       sparx5->mact_queue = create_singlethread_workqueue(queue_name);
+       INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work);
+       queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
+                          SPX5_MACT_PULL_DELAY);
+
+       err = sparx5_register_netdevs(sparx5);
+       if (err)
+               return err;
+
+       sparx5_board_init(sparx5);
+       err = sparx5_register_notifier_blocks(sparx5);
+
+       /* Start register based INJ/XTR */
+       err = -ENXIO;
+       if (err && sparx5->xtr_irq >= 0) {
+               err = devm_request_irq(sparx5->dev, sparx5->xtr_irq,
+                                      sparx5_xtr_handler, IRQF_SHARED,
+                                      "sparx5-xtr", sparx5);
+               if (!err)
+                       err = sparx5_manual_injection_mode(sparx5);
+               if (err)
+                       sparx5->xtr_irq = -ENXIO;
+       } else {
+               sparx5->xtr_irq = -ENXIO;
+       }
+       return err;
+}
+
+static void sparx5_cleanup_ports(struct sparx5 *sparx5)
+{
+       sparx5_unregister_netdevs(sparx5);
+       sparx5_destroy_netdevs(sparx5);
+}
+
+static int mchp_sparx5_probe(struct platform_device *pdev)
+{
+       struct initial_port_config *configs, *config;
+       struct device_node *np = pdev->dev.of_node;
+       struct device_node *ports, *portnp;
+       struct reset_control *reset;
+       struct sparx5 *sparx5;
+       int idx = 0, err = 0;
+       u8 *mac_addr;
+
+       if (!np && !pdev->dev.platform_data)
+               return -ENODEV;
+
+       sparx5 = devm_kzalloc(&pdev->dev, sizeof(*sparx5), GFP_KERNEL);
+       if (!sparx5)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, sparx5);
+       sparx5->pdev = pdev;
+       sparx5->dev = &pdev->dev;
+
+       /* Do switch core reset if available */
+       reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
+       if (IS_ERR(reset))
+               return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+                                    "Failed to get switch reset controller.\n");
+       reset_control_reset(reset);
+
+       /* Default values, some from DT */
+       sparx5->coreclock = SPX5_CORE_CLOCK_DEFAULT;
+
+       ports = of_get_child_by_name(np, "ethernet-ports");
+       if (!ports) {
+               dev_err(sparx5->dev, "no ethernet-ports child node found\n");
+               return -ENODEV;
+       }
+       sparx5->port_count = of_get_child_count(ports);
+
+       configs = kcalloc(sparx5->port_count,
+                         sizeof(struct initial_port_config), GFP_KERNEL);
+       if (!configs) {
+               err = -ENOMEM;
+               goto cleanup_pnode;
+       }
+
+       for_each_available_child_of_node(ports, portnp) {
+               struct sparx5_port_config *conf;
+               struct phy *serdes;
+               u32 portno;
+
+               err = of_property_read_u32(portnp, "reg", &portno);
+               if (err) {
+                       dev_err(sparx5->dev, "port reg property error\n");
+                       continue;
+               }
+               config = &configs[idx];
+               conf = &config->conf;
+               conf->speed = SPEED_UNKNOWN;
+               conf->bandwidth = SPEED_UNKNOWN;
+               err = of_get_phy_mode(portnp, &conf->phy_mode);
+               if (err) {
+                       dev_err(sparx5->dev, "port %u: missing phy-mode\n",
+                               portno);
+                       continue;
+               }
+               err = of_property_read_u32(portnp, "microchip,bandwidth",
+                                          &conf->bandwidth);
+               if (err) {
+                       dev_err(sparx5->dev, "port %u: missing bandwidth\n",
+                               portno);
+                       continue;
+               }
+               err = of_property_read_u32(portnp, "microchip,sd-sgpio", &conf->sd_sgpio);
+               if (err)
+                       conf->sd_sgpio = ~0;
+               else
+                       sparx5->sd_sgpio_remapping = true;
+               serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+               if (IS_ERR(serdes)) {
+                       err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
+                                           "port %u: missing serdes\n",
+                                           portno);
+                       goto cleanup_config;
+               }
+               config->portno = portno;
+               config->node = portnp;
+               config->serdes = serdes;
+
+               conf->media = PHY_MEDIA_DAC;
+               conf->serdes_reset = true;
+               conf->portmode = conf->phy_mode;
+               conf->power_down = true;
+               idx++;
+       }
+
+       err = sparx5_create_targets(sparx5);
+       if (err)
+               goto cleanup_config;
+
+       if (of_get_mac_address(np, mac_addr)) {
+               dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n");
+               eth_random_addr(sparx5->base_mac);
+               sparx5->base_mac[5] = 0;
+       } else {
+               ether_addr_copy(sparx5->base_mac, mac_addr);
+       }
+
+       sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr");
+
+       /* Read chip ID to check CPU interface */
+       sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID);
+
+       sparx5->target_ct = (enum spx5_target_chiptype)
+               GCB_CHIP_ID_PART_ID_GET(sparx5->chip_id);
+
+       /* Initialize Switchcore and internal RAMs */
+       err = sparx5_init_switchcore(sparx5);
+       if (err) {
+               dev_err(sparx5->dev, "Switchcore initialization error\n");
+               goto cleanup_config;
+       }
+
+       /* Initialize the LC-PLL (core clock) and set affected registers */
+       err = sparx5_init_coreclock(sparx5);
+       if (err) {
+               dev_err(sparx5->dev, "LC-PLL initialization error\n");
+               goto cleanup_config;
+       }
+
+       for (idx = 0; idx < sparx5->port_count; ++idx) {
+               config = &configs[idx];
+               if (!config->node)
+                       continue;
+
+               err = sparx5_create_port(sparx5, config);
+               if (err) {
+                       dev_err(sparx5->dev, "port create error\n");
+                       goto cleanup_ports;
+               }
+       }
+
+       err = sparx5_start(sparx5);
+       if (err) {
+               dev_err(sparx5->dev, "Start failed\n");
+               goto cleanup_ports;
+       }
+       goto cleanup_config;
+
+cleanup_ports:
+       sparx5_cleanup_ports(sparx5);
+cleanup_config:
+       kfree(configs);
+cleanup_pnode:
+       of_node_put(ports);
+       return err;
+}
+
+static int mchp_sparx5_remove(struct platform_device *pdev)
+{
+       struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+
+       if (sparx5->xtr_irq) {
+               disable_irq(sparx5->xtr_irq);
+               sparx5->xtr_irq = -ENXIO;
+       }
+       sparx5_cleanup_ports(sparx5);
+       /* Unregister netdevs */
+       sparx5_unregister_notifier_blocks(sparx5);
+
+       return 0;
+}
+
+static const struct of_device_id mchp_sparx5_match[] = {
+       { .compatible = "microchip,sparx5-switch" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mchp_sparx5_match);
+
+static struct platform_driver mchp_sparx5_driver = {
+       .probe = mchp_sparx5_probe,
+       .remove = mchp_sparx5_remove,
+       .driver = {
+               .name = "sparx5-switch",
+               .of_match_table = mchp_sparx5_match,
+       },
+};
+
+module_platform_driver(mchp_sparx5_driver);
+
+MODULE_DESCRIPTION("Microchip Sparx5 switch driver");
+MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
new file mode 100644 (file)
index 0000000..4d5f44c
--- /dev/null
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_MAIN_H__
+#define __SPARX5_MAIN_H__
+
+#include <linux/types.h>
+#include <linux/phy/phy.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitmap.h>
+#include <linux/phylink.h>
+#include <linux/hrtimer.h>
+
+/* Target chip type */
+enum spx5_target_chiptype {
+       SPX5_TARGET_CT_7546    = 0x7546,  /* SparX-5-64  Enterprise */
+       SPX5_TARGET_CT_7549    = 0x7549,  /* SparX-5-90  Enterprise */
+       SPX5_TARGET_CT_7552    = 0x7552,  /* SparX-5-128 Enterprise */
+       SPX5_TARGET_CT_7556    = 0x7556,  /* SparX-5-160 Enterprise */
+       SPX5_TARGET_CT_7558    = 0x7558,  /* SparX-5-200 Enterprise */
+       SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
+       SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
+       SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
+       SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
+       SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
+};
+
+enum sparx5_port_max_tags {
+       SPX5_PORT_MAX_TAGS_NONE,  /* No extra tags allowed */
+       SPX5_PORT_MAX_TAGS_ONE,   /* Single tag allowed */
+       SPX5_PORT_MAX_TAGS_TWO    /* Single and double tag allowed */
+};
+
+enum sparx5_vlan_port_type {
+       SPX5_VLAN_PORT_TYPE_UNAWARE, /* VLAN unaware port */
+       SPX5_VLAN_PORT_TYPE_C,       /* C-port */
+       SPX5_VLAN_PORT_TYPE_S,       /* S-port */
+       SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */
+};
+
+#define SPX5_PORTS             65
+#define SPX5_PORT_CPU          (SPX5_PORTS)  /* Next port is CPU port */
+#define SPX5_PORT_CPU_0        (SPX5_PORT_CPU + 0) /* CPU Port 65 */
+#define SPX5_PORT_CPU_1        (SPX5_PORT_CPU + 1) /* CPU Port 66 */
+#define SPX5_PORT_VD0          (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */
+#define SPX5_PORT_VD1          (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */
+#define SPX5_PORT_VD2          (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/
+#define SPX5_PORTS_ALL         (SPX5_PORT_CPU + 5) /* Total number of ports */
+
+#define PGID_BASE              SPX5_PORTS /* Starts after port PGIDs */
+#define PGID_UC_FLOOD          (PGID_BASE + 0)
+#define PGID_MC_FLOOD          (PGID_BASE + 1)
+#define PGID_IPV4_MC_DATA      (PGID_BASE + 2)
+#define PGID_IPV4_MC_CTRL      (PGID_BASE + 3)
+#define PGID_IPV6_MC_DATA      (PGID_BASE + 4)
+#define PGID_IPV6_MC_CTRL      (PGID_BASE + 5)
+#define PGID_BCAST            (PGID_BASE + 6)
+#define PGID_CPU              (PGID_BASE + 7)
+
+#define IFH_LEN                9 /* 36 bytes */
+#define NULL_VID               0
+#define SPX5_MACT_PULL_DELAY   (2 * HZ)
+#define SPX5_STATS_CHECK_DELAY (1 * HZ)
+#define SPX5_PRIOS             8     /* Number of priority queues */
+#define SPX5_BUFFER_CELL_SZ    184   /* Cell size  */
+#define SPX5_BUFFER_MEMORY     4194280 /* 22795 words * 184 bytes */
+
+#define XTR_QUEUE     0
+#define INJ_QUEUE     0
+
+struct sparx5;
+
+struct sparx5_port_config {
+       phy_interface_t portmode;
+       u32 bandwidth;
+       int speed;
+       int duplex;
+       enum phy_media media;
+       bool inband;
+       bool power_down;
+       bool autoneg;
+       bool serdes_reset;
+       u32 pause;
+       u32 pause_adv;
+       phy_interface_t phy_mode;
+       u32 sd_sgpio;
+};
+
+struct sparx5_port {
+       struct net_device *ndev;
+       struct sparx5 *sparx5;
+       struct device_node *of_node;
+       struct phy *serdes;
+       struct sparx5_port_config conf;
+       struct phylink_config phylink_config;
+       struct phylink *phylink;
+       struct phylink_pcs phylink_pcs;
+       u16 portno;
+       /* Ingress default VLAN (pvid) */
+       u16 pvid;
+       /* Egress default VLAN (vid) */
+       u16 vid;
+       bool signd_internal;
+       bool signd_active_high;
+       bool signd_enable;
+       bool flow_control;
+       enum sparx5_port_max_tags max_vlan_tags;
+       enum sparx5_vlan_port_type vlan_type;
+       u32 custom_etype;
+       u32 ifh[IFH_LEN];
+       bool vlan_aware;
+       struct hrtimer inj_timer;
+};
+
+enum sparx5_core_clockfreq {
+       SPX5_CORE_CLOCK_DEFAULT,  /* Defaults to the highest supported frequency */
+       SPX5_CORE_CLOCK_250MHZ,   /* 250MHZ core clock frequency */
+       SPX5_CORE_CLOCK_500MHZ,   /* 500MHZ core clock frequency */
+       SPX5_CORE_CLOCK_625MHZ,   /* 625MHZ core clock frequency */
+};
+
+struct sparx5 {
+       struct platform_device *pdev;
+       struct device *dev;
+       u32 chip_id;
+       enum spx5_target_chiptype target_ct;
+       void __iomem *regs[NUM_TARGETS];
+       int port_count;
+       struct mutex lock; /* MAC reg lock */
+       /* port structures are in net device */
+       struct sparx5_port *ports[SPX5_PORTS];
+       enum sparx5_core_clockfreq coreclock;
+       /* Statistics */
+       u32 num_stats;
+       u32 num_ethtool_stats;
+       const char * const *stats_layout;
+       u64 *stats;
+       /* Workqueue for reading stats */
+       struct mutex queue_stats_lock;
+       struct delayed_work stats_work;
+       struct workqueue_struct *stats_queue;
+       /* Notifiers */
+       struct notifier_block netdevice_nb;
+       struct notifier_block switchdev_nb;
+       struct notifier_block switchdev_blocking_nb;
+       /* Switch state */
+       u8 base_mac[ETH_ALEN];
+       /* Associated bridge device (when bridged) */
+       struct net_device *hw_bridge_dev;
+       /* Bridged interfaces */
+       DECLARE_BITMAP(bridge_mask, SPX5_PORTS);
+       DECLARE_BITMAP(bridge_fwd_mask, SPX5_PORTS);
+       DECLARE_BITMAP(bridge_lrn_mask, SPX5_PORTS);
+       DECLARE_BITMAP(vlan_mask[VLAN_N_VID], SPX5_PORTS);
+       /* SW MAC table */
+       struct list_head mact_entries;
+       /* mac table list (mact_entries) mutex */
+       struct mutex mact_lock;
+       struct delayed_work mact_work;
+       struct workqueue_struct *mact_queue;
+       /* Board specifics */
+       bool sd_sgpio_remapping;
+       /* Register based inj/xtr */
+       int xtr_irq;
+};
+
+/* sparx5_switchdev.c */
+int sparx5_register_notifier_blocks(struct sparx5 *sparx5);
+void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
+
+/* sparx5_packet.c */
+irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
+int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
+int sparx5_manual_injection_mode(struct sparx5 *sparx5);
+void sparx5_port_inj_timer_setup(struct sparx5_port *port);
+
+/* sparx5_mactable.c */
+void sparx5_mact_pull_work(struct work_struct *work);
+int sparx5_mact_learn(struct sparx5 *sparx5, int port,
+                     const unsigned char mac[ETH_ALEN], u16 vid);
+bool sparx5_mact_getnext(struct sparx5 *sparx5,
+                        unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
+int sparx5_mact_forget(struct sparx5 *sparx5,
+                      const unsigned char mac[ETH_ALEN], u16 vid);
+int sparx5_add_mact_entry(struct sparx5 *sparx5,
+                         struct sparx5_port *port,
+                         const unsigned char *addr, u16 vid);
+int sparx5_del_mact_entry(struct sparx5 *sparx5,
+                         const unsigned char *addr,
+                         u16 vid);
+int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr);
+int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr);
+void sparx5_set_ageing(struct sparx5 *sparx5, int msecs);
+void sparx5_mact_init(struct sparx5 *sparx5);
+
+/* sparx5_vlan.c */
+void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
+void sparx5_update_fwd(struct sparx5 *sparx5);
+void sparx5_vlan_init(struct sparx5 *sparx5);
+void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno);
+int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
+                       bool untagged);
+int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid);
+void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port);
+
+/* sparx5_calendar.c */
+int sparx5_config_auto_calendar(struct sparx5 *sparx5);
+int sparx5_config_dsm_calendar(struct sparx5 *sparx5);
+
+/* sparx5_ethtool.c */
+void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
+int sparx_stats_init(struct sparx5 *sparx5);
+
+/* sparx5_netdev.c */
+bool sparx5_netdevice_check(const struct net_device *dev);
+struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
+int sparx5_register_netdevs(struct sparx5 *sparx5);
+void sparx5_destroy_netdevs(struct sparx5 *sparx5);
+void sparx5_unregister_netdevs(struct sparx5 *sparx5);
+
+/* Clock period in picoseconds */
+static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
+{
+       switch (cclock) {
+       case SPX5_CORE_CLOCK_250MHZ:
+               return 4000;
+       case SPX5_CORE_CLOCK_500MHZ:
+               return 2000;
+       case SPX5_CORE_CLOCK_625MHZ:
+       default:
+               return 1600;
+       }
+}
+
+static inline bool sparx5_is_baser(phy_interface_t interface)
+{
+       return interface == PHY_INTERFACE_MODE_5GBASER ||
+                  interface == PHY_INTERFACE_MODE_10GBASER ||
+                  interface == PHY_INTERFACE_MODE_25GBASER;
+}
+
+extern const struct phylink_mac_ops sparx5_phylink_mac_ops;
+extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops;
+extern const struct ethtool_ops sparx5_ethtool_ops;
+
+/* Calculate raw offset */
+static inline __pure int spx5_offset(int id, int tinst, int tcnt,
+                                    int gbase, int ginst,
+                                    int gcnt, int gwidth,
+                                    int raddr, int rinst,
+                                    int rcnt, int rwidth)
+{
+       WARN_ON((tinst) >= tcnt);
+       WARN_ON((ginst) >= gcnt);
+       WARN_ON((rinst) >= rcnt);
+       return gbase + ((ginst) * gwidth) +
+               raddr + ((rinst) * rwidth);
+}
+
+/* Read, Write and modify registers content.
+ * The register definition macros start at the id
+ */
+static inline void __iomem *spx5_addr(void __iomem *base[],
+                                     int id, int tinst, int tcnt,
+                                     int gbase, int ginst,
+                                     int gcnt, int gwidth,
+                                     int raddr, int rinst,
+                                     int rcnt, int rwidth)
+{
+       WARN_ON((tinst) >= tcnt);
+       WARN_ON((ginst) >= gcnt);
+       WARN_ON((rinst) >= rcnt);
+       return base[id + (tinst)] +
+               gbase + ((ginst) * gwidth) +
+               raddr + ((rinst) * rwidth);
+}
+
+static inline void __iomem *spx5_inst_addr(void __iomem *base,
+                                          int gbase, int ginst,
+                                          int gcnt, int gwidth,
+                                          int raddr, int rinst,
+                                          int rcnt, int rwidth)
+{
+       WARN_ON((ginst) >= gcnt);
+       WARN_ON((rinst) >= rcnt);
+       return base +
+               gbase + ((ginst) * gwidth) +
+               raddr + ((rinst) * rwidth);
+}
+
+static inline u32 spx5_rd(struct sparx5 *sparx5, int id, int tinst, int tcnt,
+                         int gbase, int ginst, int gcnt, int gwidth,
+                         int raddr, int rinst, int rcnt, int rwidth)
+{
+       return readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+                              gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline u32 spx5_inst_rd(void __iomem *iomem, int id, int tinst, int tcnt,
+                              int gbase, int ginst, int gcnt, int gwidth,
+                              int raddr, int rinst, int rcnt, int rwidth)
+{
+       return readl(spx5_inst_addr(iomem, gbase, ginst,
+                                    gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_wr(u32 val, struct sparx5 *sparx5,
+                          int id, int tinst, int tcnt,
+                          int gbase, int ginst, int gcnt, int gwidth,
+                          int raddr, int rinst, int rcnt, int rwidth)
+{
+       writel(val, spx5_addr(sparx5->regs, id, tinst, tcnt,
+                             gbase, ginst, gcnt, gwidth,
+                             raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_inst_wr(u32 val, void __iomem *iomem,
+                               int id, int tinst, int tcnt,
+                               int gbase, int ginst, int gcnt, int gwidth,
+                               int raddr, int rinst, int rcnt, int rwidth)
+{
+       writel(val, spx5_inst_addr(iomem,
+                                  gbase, ginst, gcnt, gwidth,
+                                  raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_rmw(u32 val, u32 mask, struct sparx5 *sparx5,
+                           int id, int tinst, int tcnt,
+                           int gbase, int ginst, int gcnt, int gwidth,
+                           int raddr, int rinst, int rcnt, int rwidth)
+{
+       u32 nval;
+
+       nval = readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+                              gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+       nval = (nval & ~mask) | (val & mask);
+       writel(nval, spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+                              gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem,
+                                int id, int tinst, int tcnt,
+                                int gbase, int ginst, int gcnt, int gwidth,
+                                int raddr, int rinst, int rcnt, int rwidth)
+{
+       u32 nval;
+
+       nval = readl(spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
+                                   rinst, rcnt, rwidth));
+       nval = (nval & ~mask) | (val & mask);
+       writel(nval, spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
+                                   rinst, rcnt, rwidth));
+}
+
+static inline void __iomem *spx5_inst_get(struct sparx5 *sparx5, int id, int tinst)
+{
+       return sparx5->regs[id + tinst];
+}
+
+static inline void __iomem *spx5_reg_get(struct sparx5 *sparx5,
+                                        int id, int tinst, int tcnt,
+                                        int gbase, int ginst, int gcnt, int gwidth,
+                                        int raddr, int rinst, int rcnt, int rwidth)
+{
+       return spx5_addr(sparx5->regs, id, tinst, tcnt,
+                        gbase, ginst, gcnt, gwidth,
+                        raddr, rinst, rcnt, rwidth);
+}
+
+#endif /* __SPARX5_MAIN_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
new file mode 100644 (file)
index 0000000..5ab2373
--- /dev/null
@@ -0,0 +1,4642 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ * Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2021-05-06 13:06:37 +0200.
+ * Commit ID: 9ae4ec441e25e4b9003f4e514df5cb12a36b84d3
+ */
+
+#ifndef _SPARX5_MAIN_REGS_H_
+#define _SPARX5_MAIN_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum sparx5_target {
+       TARGET_ANA_AC = 1,
+       TARGET_ANA_ACL = 2,
+       TARGET_ANA_AC_POL = 4,
+       TARGET_ANA_CL = 6,
+       TARGET_ANA_L2 = 7,
+       TARGET_ANA_L3 = 8,
+       TARGET_ASM = 9,
+       TARGET_CLKGEN = 11,
+       TARGET_CPU = 12,
+       TARGET_DEV10G = 17,
+       TARGET_DEV25G = 29,
+       TARGET_DEV2G5 = 37,
+       TARGET_DEV5G = 102,
+       TARGET_DSM = 115,
+       TARGET_EACL = 116,
+       TARGET_FDMA = 117,
+       TARGET_GCB = 118,
+       TARGET_HSCH = 119,
+       TARGET_LRN = 122,
+       TARGET_PCEP = 129,
+       TARGET_PCS10G_BR = 132,
+       TARGET_PCS25G_BR = 144,
+       TARGET_PCS5G_BR = 160,
+       TARGET_PORT_CONF = 173,
+       TARGET_QFWD = 175,
+       TARGET_QRES = 176,
+       TARGET_QS = 177,
+       TARGET_QSYS = 178,
+       TARGET_REW = 179,
+       TARGET_VCAP_SUPER = 326,
+       TARGET_VOP = 327,
+       TARGET_XQS = 331,
+       NUM_TARGETS = 332
+};
+
+#define __REG(...)    __VA_ARGS__
+
+/*      ANA_AC:RAM_CTRL:RAM_INIT */
+#define ANA_AC_RAM_INIT           __REG(TARGET_ANA_AC, 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
+
+#define ANA_AC_RAM_INIT_RAM_INIT                 BIT(1)
+#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(ANA_AC_RAM_INIT_RAM_INIT, x)
+#define ANA_AC_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(ANA_AC_RAM_INIT_RAM_INIT, x)
+
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK             BIT(0)
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      ANA_AC:PS_COMMON:OWN_UPSID */
+#define ANA_AC_OWN_UPSID(r)       __REG(TARGET_ANA_AC, 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
+
+#define ANA_AC_OWN_UPSID_OWN_UPSID               GENMASK(4, 0)
+#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+
+/*      ANA_AC:SRC:SRC_CFG */
+#define ANA_AC_SRC_CFG(g)         __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
+
+/*      ANA_AC:SRC:SRC_CFG1 */
+#define ANA_AC_SRC_CFG1(g)        __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
+
+/*      ANA_AC:SRC:SRC_CFG2 */
+#define ANA_AC_SRC_CFG2(g)        __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
+
+#define ANA_AC_SRC_CFG2_PORT_MASK2               BIT(0)
+#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\
+       FIELD_PREP(ANA_AC_SRC_CFG2_PORT_MASK2, x)
+#define ANA_AC_SRC_CFG2_PORT_MASK2_GET(x)\
+       FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x)
+
+/*      ANA_AC:PGID:PGID_CFG */
+#define ANA_AC_PGID_CFG(g)        __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
+
+/*      ANA_AC:PGID:PGID_CFG1 */
+#define ANA_AC_PGID_CFG1(g)       __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
+
+/*      ANA_AC:PGID:PGID_CFG2 */
+#define ANA_AC_PGID_CFG2(g)       __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
+
+#define ANA_AC_PGID_CFG2_PORT_MASK2              BIT(0)
+#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\
+       FIELD_PREP(ANA_AC_PGID_CFG2_PORT_MASK2, x)
+#define ANA_AC_PGID_CFG2_PORT_MASK2_GET(x)\
+       FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x)
+
+/*      ANA_AC:PGID:PGID_MISC_CFG */
+#define ANA_AC_PGID_MISC_CFG(g)   __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
+
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU         GENMASK(6, 4)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\
+       FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_GET(x)\
+       FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x)
+
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA      BIT(1)
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x)
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_GET(x)\
+       FIELD_GET(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x)
+
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA   BIT(0)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\
+       FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+
+/*      ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_PORT_SGE_CFG(r)    __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
+
+#define ANA_AC_PORT_SGE_CFG_MASK                 GENMASK(15, 0)
+#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\
+       FIELD_PREP(ANA_AC_PORT_SGE_CFG_MASK, x)
+#define ANA_AC_PORT_SGE_CFG_MASK_GET(x)\
+       FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x)
+
+/*      ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
+#define ANA_AC_STAT_RESET         __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
+
+#define ANA_AC_STAT_RESET_RESET                  BIT(0)
+#define ANA_AC_STAT_RESET_RESET_SET(x)\
+       FIELD_PREP(ANA_AC_STAT_RESET_RESET, x)
+#define ANA_AC_STAT_RESET_RESET_GET(x)\
+       FIELD_GET(ANA_AC_STAT_RESET_RESET, x)
+
+/*      ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
+#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK       GENMASK(11, 4)
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\
+       FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_GET(x)\
+       FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE    GENMASK(3, 1)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(x)\
+       FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_GET(x)\
+       FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE        BIT(0)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(x)\
+       FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_GET(x)\
+       FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
+
+/*      ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
+#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+
+/*      ANA_ACL:COMMON:OWN_UPSID */
+#define ANA_ACL_OWN_UPSID(r)      __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
+
+#define ANA_ACL_OWN_UPSID_OWN_UPSID              GENMASK(4, 0)
+#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
+#define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
+
+/*      ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
+#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL, 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
+
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT   GENMASK(9, 0)
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\
+       FIELD_PREP(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_GET(x)\
+       FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
+
+/*      ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
+#define ANA_AC_POL_BDLB_DLB_CTRL  __REG(TARGET_ANA_AC_POL, 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
+       FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\
+       FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT   GENMASK(18, 4)
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\
+       FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\
+       FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA        BIT(1)
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_GET(x)\
+       FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA     BIT(0)
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
+       FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
+
+/*      ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
+#define ANA_AC_POL_SLB_DLB_CTRL   __REG(TARGET_ANA_AC_POL, 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS  GENMASK(26, 19)
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
+       FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\
+       FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT    GENMASK(18, 4)
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\
+       FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\
+       FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA         BIT(1)
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_GET(x)\
+       FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA      BIT(0)
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
+       FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+
+/*      ANA_CL:PORT:FILTER_CTRL */
+#define ANA_CL_FILTER_CTRL(g)     __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
+
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS    BIT(2)
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x)
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_GET(x)\
+       FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x)
+
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS   BIT(1)
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x)
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_GET(x)\
+       FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x)
+
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA  BIT(0)
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_GET(x)\
+       FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
+
+/*      ANA_CL:PORT:VLAN_FILTER_CTRL */
+#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
+
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10)
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x)
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS    BIT(9)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS         BIT(8)
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS    BIT(7)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS BIT(6)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS BIT(5)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS BIT(4)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS         BIT(3)
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS   BIT(2)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS   BIT(1)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS   BIT(0)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
+
+/*      ANA_CL:PORT:ETAG_FILTER_CTRL */
+#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
+
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_GET(x)\
+       FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x)
+
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS         BIT(0)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
+
+/*      ANA_CL:PORT:VLAN_CTRL */
+#define ANA_CL_VLAN_CTRL(g)       __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP    GENMASK(25, 23)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI    BIT(22)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA  BIT(21)
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x)
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL            BIT(20)
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x)
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA          BIT(19)
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x)
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT            GENMASK(18, 17)
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x)
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE           BIT(16)
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x)
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_PCP                GENMASK(15, 13)
+#define ANA_CL_VLAN_CTRL_PORT_PCP_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_PCP, x)
+#define ANA_CL_VLAN_CTRL_PORT_PCP_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_PCP, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_DEI                BIT(12)
+#define ANA_CL_VLAN_CTRL_PORT_DEI_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_DEI, x)
+#define ANA_CL_VLAN_CTRL_PORT_DEI_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_DEI, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VID                GENMASK(11, 0)
+#define ANA_CL_VLAN_CTRL_PORT_VID_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VID, x)
+#define ANA_CL_VLAN_CTRL_PORT_VID_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x)
+
+/*      ANA_CL:PORT:VLAN_CTRL_2 */
+#define ANA_CL_VLAN_CTRL_2(g)     __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
+
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT         GENMASK(1, 0)
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
+
+/*      ANA_CL:PORT:CAPTURE_BPDU_CFG */
+#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+
+/*      ANA_CL:COMMON:OWN_UPSID */
+#define ANA_CL_OWN_UPSID(r)       __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
+
+#define ANA_CL_OWN_UPSID_OWN_UPSID               GENMASK(4, 0)
+#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(ANA_CL_OWN_UPSID_OWN_UPSID, x)
+#define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x)
+
+/*      ANA_L2:COMMON:AUTO_LRN_CFG */
+#define ANA_L2_AUTO_LRN_CFG       __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
+
+/*      ANA_L2:COMMON:AUTO_LRN_CFG1 */
+#define ANA_L2_AUTO_LRN_CFG1      __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
+
+/*      ANA_L2:COMMON:AUTO_LRN_CFG2 */
+#define ANA_L2_AUTO_LRN_CFG2      __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
+
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2       BIT(0)
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\
+       FIELD_PREP(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_GET(x)\
+       FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
+
+/*      ANA_L2:COMMON:OWN_UPSID */
+#define ANA_L2_OWN_UPSID(r)       __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
+
+#define ANA_L2_OWN_UPSID_OWN_UPSID               GENMASK(4, 0)
+#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+
+/*      ANA_L3:COMMON:VLAN_CTRL */
+#define ANA_L3_VLAN_CTRL          __REG(TARGET_ANA_L3, 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
+
+#define ANA_L3_VLAN_CTRL_VLAN_ENA                BIT(0)
+#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
+#define ANA_L3_VLAN_CTRL_VLAN_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
+
+/*      ANA_L3:VLAN:VLAN_CFG */
+#define ANA_L3_VLAN_CFG(g)        __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
+
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR            GENMASK(30, 24)
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x)
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_FID                 GENMASK(20, 8)
+#define ANA_L3_VLAN_CFG_VLAN_FID_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FID, x)
+#define ANA_L3_VLAN_CFG_VLAN_FID_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FID, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA      BIT(6)
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA         BIT(5)
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS           BIT(4)
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x)
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS             BIT(3)
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x)
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA            BIT(2)
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA         BIT(1)
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA          BIT(0)
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
+
+/*      ANA_L3:VLAN:VLAN_MASK_CFG */
+#define ANA_L3_VLAN_MASK_CFG(g)   __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
+
+/*      ANA_L3:VLAN:VLAN_MASK_CFG1 */
+#define ANA_L3_VLAN_MASK_CFG1(g)  __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
+
+/*      ANA_L3:VLAN:VLAN_MASK_CFG2 */
+#define ANA_L3_VLAN_MASK_CFG2(g)  __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
+
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2    BIT(0)
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
+
+/*      ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
+#define ASM_RX_IN_BYTES_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
+#define ASM_RX_SYMBOL_ERR_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_PAUSE_CNT */
+#define ASM_RX_PAUSE_CNT(g)       __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
+#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
+#define ASM_RX_OK_BYTES_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
+#define ASM_RX_BAD_BYTES_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_UC_CNT */
+#define ASM_RX_UC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_MC_CNT */
+#define ASM_RX_MC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_BC_CNT */
+#define ASM_RX_BC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
+#define ASM_RX_CRC_ERR_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
+#define ASM_RX_UNDERSIZE_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
+#define ASM_RX_FRAGMENTS_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
+#define ASM_RX_OVERSIZE_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_JABBERS_CNT */
+#define ASM_RX_JABBERS_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE64_CNT */
+#define ASM_RX_SIZE64_CNT(g)      __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
+#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
+#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
+#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
+#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
+#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
+#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
+#define ASM_RX_IPG_SHRINK_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
+#define ASM_TX_OUT_BYTES_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_PAUSE_CNT */
+#define ASM_TX_PAUSE_CNT(g)       __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
+#define ASM_TX_OK_BYTES_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_UC_CNT */
+#define ASM_TX_UC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_MC_CNT */
+#define ASM_TX_MC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_BC_CNT */
+#define ASM_TX_BC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE64_CNT */
+#define ASM_TX_SIZE64_CNT(g)      __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
+#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
+#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
+#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
+#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
+#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
+#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
+#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
+#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
+#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
+#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
+#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
+#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
+#define ASM_PMAC_RX_PAUSE_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
+#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
+#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
+#define ASM_PMAC_RX_UC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
+#define ASM_PMAC_RX_MC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
+#define ASM_PMAC_RX_BC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
+#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
+#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
+#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
+#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
+#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
+#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
+#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
+#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
+#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
+#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
+#define ASM_PMAC_TX_PAUSE_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
+#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
+#define ASM_PMAC_TX_UC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
+#define ASM_PMAC_TX_MC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
+#define ASM_PMAC_TX_BC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
+#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
+#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
+#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
+#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
+#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
+#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
+#define ASM_MM_RX_SMD_ERR_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
+#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
+#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
+#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
+#define ASM_TX_MULTI_COLL_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
+#define ASM_TX_LATE_COLL_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_XCOLL_CNT */
+#define ASM_TX_XCOLL_CNT(g)       __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_DEFER_CNT */
+#define ASM_TX_DEFER_CNT(g)       __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_XDEFER_CNT */
+#define ASM_TX_XDEFER_CNT(g)      __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
+#define ASM_TX_BACKOFF1_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_CSENSE_CNT */
+#define ASM_TX_CSENSE_CNT(g)      __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
+#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
+
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
+#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
+
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
+
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
+#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
+
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
+
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
+#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
+
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
+#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
+
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
+
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
+#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
+
+/*      ASM:CFG:STAT_CFG */
+#define ASM_STAT_CFG              __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
+
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT           BIT(0)
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\
+       FIELD_PREP(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_GET(x)\
+       FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
+
+/*      ASM:CFG:PORT_CFG */
+#define ASM_PORT_CFG(r)           __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
+
+#define ASM_PORT_CFG_CSC_STAT_DIS                BIT(12)
+#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_CSC_STAT_DIS, x)
+#define ASM_PORT_CFG_CSC_STAT_DIS_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_CSC_STAT_DIS, x)
+
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA      BIT(11)
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA          BIT(10)
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x)
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x)
+
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA             BIT(9)
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_NO_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_NO_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA           BIT(8)
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_FRM_AGING_DIS               BIT(7)
+#define ASM_PORT_CFG_FRM_AGING_DIS_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_FRM_AGING_DIS, x)
+#define ASM_PORT_CFG_FRM_AGING_DIS_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_FRM_AGING_DIS, x)
+
+#define ASM_PORT_CFG_PAD_ENA                     BIT(6)
+#define ASM_PORT_CFG_PAD_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_PAD_ENA, x)
+#define ASM_PORT_CFG_PAD_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_PAD_ENA, x)
+
+#define ASM_PORT_CFG_INJ_DISCARD_CFG             GENMASK(5, 4)
+#define ASM_PORT_CFG_INJ_DISCARD_CFG_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_INJ_DISCARD_CFG, x)
+#define ASM_PORT_CFG_INJ_DISCARD_CFG_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_INJ_DISCARD_CFG, x)
+
+#define ASM_PORT_CFG_INJ_FORMAT_CFG              GENMASK(3, 2)
+#define ASM_PORT_CFG_INJ_FORMAT_CFG_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_INJ_FORMAT_CFG, x)
+#define ASM_PORT_CFG_INJ_FORMAT_CFG_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_INJ_FORMAT_CFG, x)
+
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA              BIT(1)
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_VSTAX2_AWR_ENA, x)
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_VSTAX2_AWR_ENA, x)
+
+#define ASM_PORT_CFG_PFRM_FLUSH                  BIT(0)
+#define ASM_PORT_CFG_PFRM_FLUSH_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_PFRM_FLUSH, x)
+#define ASM_PORT_CFG_PFRM_FLUSH_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x)
+
+/*      ASM:RAM_CTRL:RAM_INIT */
+#define ASM_RAM_INIT              __REG(TARGET_ASM, 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
+
+#define ASM_RAM_INIT_RAM_INIT                    BIT(1)
+#define ASM_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(ASM_RAM_INIT_RAM_INIT, x)
+#define ASM_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(ASM_RAM_INIT_RAM_INIT, x)
+
+#define ASM_RAM_INIT_RAM_CFG_HOOK                BIT(0)
+#define ASM_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(ASM_RAM_INIT_RAM_CFG_HOOK, x)
+#define ASM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
+#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV  GENMASK(7, 0)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV  GENMASK(10, 8)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR  BIT(11)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL  GENMASK(13, 12)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA  BIT(14)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA  BIT(15)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
+
+/*      CPU:CPU_REGS:PROC_CTRL */
+#define CPU_PROC_CTRL             __REG(TARGET_CPU, 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
+
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA           BIT(12)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS      BIT(11)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS      BIT(10)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+
+#define CPU_PROC_CTRL_BE_EXCEP_MODE              BIT(9)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+
+#define CPU_PROC_CTRL_VINITHI                    BIT(8)
+#define CPU_PROC_CTRL_VINITHI_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_VINITHI, x)
+#define CPU_PROC_CTRL_VINITHI_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_VINITHI, x)
+
+#define CPU_PROC_CTRL_CFGTE                      BIT(7)
+#define CPU_PROC_CTRL_CFGTE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_CFGTE, x)
+#define CPU_PROC_CTRL_CFGTE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_CFGTE, x)
+
+#define CPU_PROC_CTRL_CP15S_DISABLE              BIT(6)
+#define CPU_PROC_CTRL_CP15S_DISABLE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_CP15S_DISABLE, x)
+#define CPU_PROC_CTRL_CP15S_DISABLE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_CP15S_DISABLE, x)
+
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE        BIT(5)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA        BIT(4)
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+
+#define CPU_PROC_CTRL_ACP_AWCACHE                BIT(3)
+#define CPU_PROC_CTRL_ACP_AWCACHE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_ACP_AWCACHE, x)
+#define CPU_PROC_CTRL_ACP_AWCACHE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_ACP_AWCACHE, x)
+
+#define CPU_PROC_CTRL_ACP_ARCACHE                BIT(2)
+#define CPU_PROC_CTRL_ACP_ARCACHE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_ACP_ARCACHE, x)
+#define CPU_PROC_CTRL_ACP_ARCACHE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_ACP_ARCACHE, x)
+
+#define CPU_PROC_CTRL_L2_FLUSH_REQ               BIT(1)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+
+#define CPU_PROC_CTRL_ACP_DISABLE                BIT(0)
+#define CPU_PROC_CTRL_ACP_DISABLE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_ACP_DISABLE, x)
+#define CPU_PROC_CTRL_ACP_DISABLE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV10G_MAC_ENA_CFG(t)     __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV10G_MAC_ENA_CFG_RX_ENA                BIT(4)
+#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV10G_MAC_ENA_CFG_RX_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV10G_MAC_ENA_CFG_TX_ENA                BIT(0)
+#define DEV10G_MAC_ENA_CFG_TX_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV10G_MAC_ENA_CFG_TX_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV10G_MAC_MAXLEN_CFG(t)  __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK    BIT(16)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+       FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+       FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN            GENMASK(15, 0)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+       FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+       FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
+#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
+
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS         GENMASK(1, 0)
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\
+       FIELD_PREP(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_GET(x)\
+       FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 16, r, 3, 4)
+
+#define DEV10G_MAC_TAGS_CFG_TAG_ID               GENMASK(31, 16)
+#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ID, x)
+#define DEV10G_MAC_TAGS_CFG_TAG_ID_GET(x)\
+       FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA              BIT(4)
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA   BIT(24)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA   BIT(20)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA       BIT(16)
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS   BIT(12)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA       BIT(8)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA       BIT(4)
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA       BIT(0)
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
+#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY BIT(3)
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY BIT(2)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY BIT(1)
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY BIT(0)
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
+
+/*      DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV10G_DEV_RST_CTRL(t)    __REG(TARGET_DEV10G, t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA      BIT(28)
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL     GENMASK(24, 23)
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL            GENMASK(22, 20)
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST           BIT(12)
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST           BIT(8)
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST           BIT(4)
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST           BIT(0)
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/*      DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV10G_PCS25G_CFG(t)      __REG(TARGET_DEV10G, t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
+
+#define DEV10G_PCS25G_CFG_PCS25G_ENA             BIT(0)
+#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\
+       FIELD_PREP(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
+#define DEV10G_PCS25G_CFG_PCS25G_ENA_GET(x)\
+       FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV25G_MAC_ENA_CFG(t)     __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV25G_MAC_ENA_CFG_RX_ENA                BIT(4)
+#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV25G_MAC_ENA_CFG_RX_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV25G_MAC_ENA_CFG_TX_ENA                BIT(0)
+#define DEV25G_MAC_ENA_CFG_TX_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV25G_MAC_ENA_CFG_TX_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV25G_MAC_MAXLEN_CFG(t)  __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK    BIT(16)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+       FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+       FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN            GENMASK(15, 0)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+       FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+       FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA   BIT(24)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA   BIT(20)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA       BIT(16)
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS   BIT(12)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA       BIT(8)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA       BIT(4)
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA       BIT(0)
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/*      DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV25G_DEV_RST_CTRL(t)    __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA      BIT(28)
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL     GENMASK(24, 23)
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL            GENMASK(22, 20)
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST           BIT(12)
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST           BIT(8)
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST           BIT(4)
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST           BIT(0)
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/*      DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV25G_PCS25G_CFG(t)      __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
+
+#define DEV25G_PCS25G_CFG_PCS25G_ENA             BIT(0)
+#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\
+       FIELD_PREP(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
+#define DEV25G_PCS25G_CFG_PCS25G_ENA_GET(x)\
+       FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
+
+/*      DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
+#define DEV25G_PCS25G_SD_CFG(t)   __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
+
+#define DEV25G_PCS25G_SD_CFG_SD_SEL              BIT(8)
+#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_SEL, x)
+#define DEV25G_PCS25G_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_SEL, x)
+
+#define DEV25G_PCS25G_SD_CFG_SD_POL              BIT(4)
+#define DEV25G_PCS25G_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_POL, x)
+#define DEV25G_PCS25G_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_POL, x)
+
+#define DEV25G_PCS25G_SD_CFG_SD_ENA              BIT(0)
+#define DEV25G_PCS25G_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
+#define DEV25G_PCS25G_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
+
+/*      DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV2G5_DEV_RST_CTRL(t)    __REG(TARGET_DEV2G5, t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23)
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL            GENMASK(22, 20)
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST       BIT(17)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST       BIT(16)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST           BIT(12)
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST           BIT(8)
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST           BIT(4)
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST           BIT(0)
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV2G5_MAC_ENA_CFG(t)     __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEV2G5_MAC_ENA_CFG_RX_ENA                BIT(4)
+#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_ENA_CFG_RX_ENA, x)
+#define DEV2G5_MAC_ENA_CFG_RX_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV2G5_MAC_ENA_CFG_TX_ENA                BIT(0)
+#define DEV2G5_MAC_ENA_CFG_TX_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
+#define DEV2G5_MAC_ENA_CFG_TX_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV2G5_MAC_MODE_CFG(t)    __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
+
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA     BIT(8)
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x)
+
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA        BIT(4)
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA              BIT(0)
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV2G5_MAC_MAXLEN_CFG(t)  __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
+
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN            GENMASK(15, 0)
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+       FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV2G5_MAC_TAGS_CFG(t)    __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID               GENMASK(31, 16)
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG_TAG_ID, x)
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA     BIT(3)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA               GENMASK(2, 1)
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG_PB_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA         BIT(0)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
+#define DEV2G5_MAC_TAGS_CFG2(t)   __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
+
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3             GENMASK(31, 16)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x)
+
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2             GENMASK(15, 0)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
+
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA      BIT(0)
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV2G5_MAC_IFG_CFG(t)     __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x)
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_GET(x)\
+       FIELD_GET(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x)
+
+#define DEV2G5_MAC_IFG_CFG_TX_IFG                GENMASK(12, 8)
+#define DEV2G5_MAC_IFG_CFG_TX_IFG_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_IFG_CFG_TX_IFG, x)
+#define DEV2G5_MAC_IFG_CFG_TX_IFG_GET(x)\
+       FIELD_GET(DEV2G5_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2               GENMASK(7, 4)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG2, x)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2_GET(x)\
+       FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1               GENMASK(3, 0)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1_GET(x)\
+       FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV2G5_MAC_HDX_CFG(t)     __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
+
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC       BIT(26)
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x)
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x)
+
+#define DEV2G5_MAC_HDX_CFG_SEED                  GENMASK(23, 16)
+#define DEV2G5_MAC_HDX_CFG_SEED_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED, x)
+#define DEV2G5_MAC_HDX_CFG_SEED_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED, x)
+
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD             BIT(12)
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x)
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x)
+
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x)
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x)
+
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS          GENMASK(6, 0)
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV2G5_PCS1G_CFG(t)       __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
+
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE        BIT(4)
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x)
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x)
+
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA        BIT(1)
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x)
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x)
+
+#define DEV2G5_PCS1G_CFG_PCS_ENA                 BIT(0)
+#define DEV2G5_PCS1G_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_CFG_PCS_ENA, x)
+#define DEV2G5_PCS1G_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV2G5_PCS1G_MODE_CFG(t)  __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
+
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA    BIT(4)
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x)
+
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA  BIT(1)
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA     BIT(0)
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV2G5_PCS1G_SD_CFG(t)    __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL               BIT(8)
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_SEL, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_SEL, x)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_POL               BIT(4)
+#define DEV2G5_PCS1G_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_POL, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_POL, x)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA               BIT(0)
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV2G5_PCS1G_ANEG_CFG(t)  __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY        GENMASK(31, 16)
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA     BIT(8)
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA           BIT(0)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
+#define DEV2G5_PCS1G_LB_CFG(t)    __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
+
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA               BIT(4)
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LB_CFG_RA_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LB_CFG_RA_ENA, x)
+
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA      BIT(1)
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x)
+
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA      BIT(0)
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY  GENMASK(31, 16)
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_PR              BIT(4)
+#define DEV2G5_PCS1G_ANEG_STATUS_PR_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PR, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_PR_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PR, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY  BIT(3)
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE   BIT(0)
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
+
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR       GENMASK(15, 12)
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x)
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT   BIT(8)
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x)
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS     BIT(4)
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x)
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS     BIT(0)
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV2G5_PCS1G_STICKY(t)    __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
+
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY     BIT(4)
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY   BIT(0)
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
+
+/*      DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
+#define DEV2G5_PCS_FX100_CFG(t)   __REG(TARGET_DEV2G5, t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
+
+#define DEV2G5_PCS_FX100_CFG_SD_SEL              BIT(26)
+#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_SEL, x)
+#define DEV2G5_PCS_FX100_CFG_SD_SEL_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_SEL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SD_POL              BIT(25)
+#define DEV2G5_PCS_FX100_CFG_SD_POL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_POL, x)
+#define DEV2G5_PCS_FX100_CFG_SD_POL_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_POL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SD_ENA              BIT(24)
+#define DEV2G5_PCS_FX100_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_SD_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA        BIT(20)
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA        BIT(16)
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL            GENMASK(15, 12)
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_RXBITSEL, x)
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_RXBITSEL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG          GENMASK(10, 9)
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x)
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x)
+
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA     BIT(8)
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER       GENMASK(7, 4)
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x)
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x)
+
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA     BIT(3)
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA          BIT(2)
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA          BIT(1)
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA             BIT(0)
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
+
+/*      DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
+#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
+
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP     GENMASK(11, 8)
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x)
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x)
+
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS       BIT(2)
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x)
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT    BIT(1)
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x)
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS      BIT(0)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV5G_MAC_ENA_CFG(t)      __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV5G_MAC_ENA_CFG_RX_ENA                 BIT(4)
+#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV5G_MAC_ENA_CFG_RX_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV5G_MAC_ENA_CFG_TX_ENA                 BIT(0)
+#define DEV5G_MAC_ENA_CFG_TX_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV5G_MAC_ENA_CFG_TX_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV5G_MAC_MAXLEN_CFG(t)   __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK     BIT(16)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+       FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+       FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN             GENMASK(15, 0)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+       FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+       FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV5G_MAC_ADV_CHK_CFG(t)  __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA    BIT(24)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA    BIT(20)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA        BIT(16)
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS    BIT(12)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA        BIT(8)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA        BIT(4)
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA        BIT(0)
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
+#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
+#define DEV5G_RX_PAUSE_CNT(t)     __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
+#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
+#define DEV5G_RX_UC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
+#define DEV5G_RX_MC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
+#define DEV5G_RX_BC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
+#define DEV5G_RX_CRC_ERR_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
+#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
+#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
+#define DEV5G_RX_OVERSIZE_CNT(t)  __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
+#define DEV5G_RX_JABBERS_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
+#define DEV5G_RX_SIZE64_CNT(t)    __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
+#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
+#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
+#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
+#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
+#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
+#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
+#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
+#define DEV5G_TX_PAUSE_CNT(t)     __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
+#define DEV5G_TX_UC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
+#define DEV5G_TX_MC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
+#define DEV5G_TX_BC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
+#define DEV5G_TX_SIZE64_CNT(t)    __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
+#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
+#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
+#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
+#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
+#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
+#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
+#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
+#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
+#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
+#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
+#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
+#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
+#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
+#define DEV5G_PMAC_RX_UC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
+#define DEV5G_PMAC_RX_MC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
+#define DEV5G_PMAC_RX_BC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
+#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
+#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
+#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+                                       t, 13, 60, 0, 1, 312, 184, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+                                       t, 13, 60, 0, 1, 312, 188, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
+#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
+#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
+#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
+#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
+#define DEV5G_PMAC_TX_UC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
+#define DEV5G_PMAC_TX_MC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
+#define DEV5G_PMAC_TX_BC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
+#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
+#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
+#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
+#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
+#define DEV5G_RX_IN_BYTES_CNT(t)  __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
+#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
+
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
+#define DEV5G_RX_OK_BYTES_CNT(t)  __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
+#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
+
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
+#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
+
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
+#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
+#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
+
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
+#define DEV5G_TX_OK_BYTES_CNT(t)  __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
+#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
+
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
+
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
+
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
+
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV5G_DEV_RST_CTRL(t)     __REG(TARGET_DEV5G, t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA       BIT(28)
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL      GENMASK(24, 23)
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL             GENMASK(22, 20)
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST            BIT(12)
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST            BIT(8)
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST            BIT(4)
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST            BIT(0)
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/*      DSM:RAM_CTRL:RAM_INIT */
+#define DSM_RAM_INIT              __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
+
+#define DSM_RAM_INIT_RAM_INIT                    BIT(1)
+#define DSM_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(DSM_RAM_INIT_RAM_INIT, x)
+#define DSM_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(DSM_RAM_INIT_RAM_INIT, x)
+
+#define DSM_RAM_INIT_RAM_CFG_HOOK                BIT(0)
+#define DSM_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(DSM_RAM_INIT_RAM_CFG_HOOK, x)
+#define DSM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      DSM:CFG:BUF_CFG */
+#define DSM_BUF_CFG(r)            __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
+
+#define DSM_BUF_CFG_CSC_STAT_DIS                 BIT(13)
+#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\
+       FIELD_PREP(DSM_BUF_CFG_CSC_STAT_DIS, x)
+#define DSM_BUF_CFG_CSC_STAT_DIS_GET(x)\
+       FIELD_GET(DSM_BUF_CFG_CSC_STAT_DIS, x)
+
+#define DSM_BUF_CFG_AGING_ENA                    BIT(12)
+#define DSM_BUF_CFG_AGING_ENA_SET(x)\
+       FIELD_PREP(DSM_BUF_CFG_AGING_ENA, x)
+#define DSM_BUF_CFG_AGING_ENA_GET(x)\
+       FIELD_GET(DSM_BUF_CFG_AGING_ENA, x)
+
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS       BIT(11)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(x)\
+       FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_GET(x)\
+       FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x)
+
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT   GENMASK(10, 0)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_SET(x)\
+       FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_GET(x)\
+       FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
+
+/*      DSM:CFG:DEV_TX_STOP_WM_CFG */
+#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
+
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA  BIT(9)
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\
+       FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x)
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_GET(x)\
+       FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA BIT(8)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(x)\
+       FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_GET(x)\
+       FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM    GENMASK(7, 1)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(x)\
+       FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_GET(x)\
+       FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR    BIT(0)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(x)\
+       FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_GET(x)\
+       FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
+
+/*      DSM:CFG:RX_PAUSE_CFG */
+#define DSM_RX_PAUSE_CFG(r)       __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
+
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN             BIT(1)
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\
+       FIELD_PREP(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x)
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_GET(x)\
+       FIELD_GET(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x)
+
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL           BIT(0)
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_SET(x)\
+       FIELD_PREP(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_GET(x)\
+       FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
+
+/*      DSM:CFG:MAC_CFG */
+#define DSM_MAC_CFG(r)            __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
+
+#define DSM_MAC_CFG_TX_PAUSE_VAL                 GENMASK(31, 16)
+#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\
+       FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_VAL, x)
+#define DSM_MAC_CFG_TX_PAUSE_VAL_GET(x)\
+       FIELD_GET(DSM_MAC_CFG_TX_PAUSE_VAL, x)
+
+#define DSM_MAC_CFG_HDX_BACKPREASSURE            BIT(2)
+#define DSM_MAC_CFG_HDX_BACKPREASSURE_SET(x)\
+       FIELD_PREP(DSM_MAC_CFG_HDX_BACKPREASSURE, x)
+#define DSM_MAC_CFG_HDX_BACKPREASSURE_GET(x)\
+       FIELD_GET(DSM_MAC_CFG_HDX_BACKPREASSURE, x)
+
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE         BIT(1)
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_SET(x)\
+       FIELD_PREP(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x)
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_GET(x)\
+       FIELD_GET(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x)
+
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF            BIT(0)
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_SET(x)\
+       FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_GET(x)\
+       FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
+
+/*      DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
+#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
+
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\
+       FIELD_PREP(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_GET(x)\
+       FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
+
+/*      DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
+#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
+
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW   GENMASK(23, 0)
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\
+       FIELD_PREP(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_GET(x)\
+       FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
+
+/*      DSM:CFG:TAXI_CAL_CFG */
+#define DSM_TAXI_CAL_CFG(r)       __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
+
+#define DSM_TAXI_CAL_CFG_CAL_IDX                 GENMASK(20, 15)
+#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_IDX, x)
+#define DSM_TAXI_CAL_CFG_CAL_IDX_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_IDX, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN             GENMASK(14, 9)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL             GENMASK(8, 5)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL             GENMASK(4, 1)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA             BIT(0)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+
+/*      EACL:POL_CFG:POL_EACL_CFG */
+#define EACL_POL_EACL_CFG         __REG(TARGET_EACL, 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
+
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5)
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x)
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x)
+
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY     BIT(4)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x)
+
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY    BIT(3)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE       BIT(2)
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN        BIT(1)
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT        BIT(0)
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+
+/*      EACL:RAM_CTRL:RAM_INIT */
+#define EACL_RAM_INIT             __REG(TARGET_EACL, 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
+
+#define EACL_RAM_INIT_RAM_INIT                   BIT(1)
+#define EACL_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(EACL_RAM_INIT_RAM_INIT, x)
+#define EACL_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(EACL_RAM_INIT_RAM_INIT, x)
+
+#define EACL_RAM_INIT_RAM_CFG_HOOK               BIT(0)
+#define EACL_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(EACL_RAM_INIT_RAM_CFG_HOOK, x)
+#define EACL_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE          __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE             GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+       FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+       FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/*      FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD            __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD                 GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+       FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+       FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/*      FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE           __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE               GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+       FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+       FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/*      FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r)           __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/*      FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r)          __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/*      FDMA:FDMA:FDMA_DCB_LLP_PREV */
+#define FDMA_DCB_LLP_PREV(r)      __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
+
+/*      FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
+#define FDMA_DCB_LLP_PREV1(r)     __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
+
+/*      FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r)            __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE           BIT(7)
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY          BIT(6)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT                  BIT(5)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT                GENMASK(4, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM                       BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/*      FDMA:FDMA:FDMA_CH_TRANSLATE */
+#define FDMA_CH_TRANSLATE(r)      __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
+
+#define FDMA_CH_TRANSLATE_OFFSET                 GENMASK(15, 0)
+#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\
+       FIELD_PREP(FDMA_CH_TRANSLATE_OFFSET, x)
+#define FDMA_CH_TRANSLATE_OFFSET_GET(x)\
+       FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x)
+
+/*      FDMA:FDMA:FDMA_XTR_CFG */
+#define FDMA_XTR_CFG              __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
+
+#define FDMA_XTR_CFG_XTR_FIFO_WM                 GENMASK(15, 11)
+#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\
+       FIELD_PREP(FDMA_XTR_CFG_XTR_FIFO_WM, x)
+#define FDMA_XTR_CFG_XTR_FIFO_WM_GET(x)\
+       FIELD_GET(FDMA_XTR_CFG_XTR_FIFO_WM, x)
+
+#define FDMA_XTR_CFG_XTR_ARB_SAT                 GENMASK(10, 0)
+#define FDMA_XTR_CFG_XTR_ARB_SAT_SET(x)\
+       FIELD_PREP(FDMA_XTR_CFG_XTR_ARB_SAT, x)
+#define FDMA_XTR_CFG_XTR_ARB_SAT_GET(x)\
+       FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x)
+
+/*      FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r)         __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP                  BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE            BIT(3)
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP_FORCE, x)
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_INJ_STOP_FORCE, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP                  BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY          BIT(1)
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x)
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x)
+
+#define FDMA_PORT_CTRL_XTR_BUF_RST               BIT(0)
+#define FDMA_PORT_CTRL_XTR_BUF_RST_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_RST, x)
+#define FDMA_PORT_CTRL_XTR_BUF_RST_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x)
+
+/*      FDMA:FDMA:FDMA_INTR_DCB */
+#define FDMA_INTR_DCB             __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
+
+#define FDMA_INTR_DCB_INTR_DCB                   GENMASK(7, 0)
+#define FDMA_INTR_DCB_INTR_DCB_SET(x)\
+       FIELD_PREP(FDMA_INTR_DCB_INTR_DCB, x)
+#define FDMA_INTR_DCB_INTR_DCB_GET(x)\
+       FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x)
+
+/*      FDMA:FDMA:FDMA_INTR_DCB_ENA */
+#define FDMA_INTR_DCB_ENA         __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
+
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA           GENMASK(7, 0)
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\
+       FIELD_PREP(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_GET(x)\
+       FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
+
+/*      FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB              __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+#define FDMA_INTR_DB_INTR_DB                     GENMASK(7, 0)
+#define FDMA_INTR_DB_INTR_DB_SET(x)\
+       FIELD_PREP(FDMA_INTR_DB_INTR_DB, x)
+#define FDMA_INTR_DB_INTR_DB_GET(x)\
+       FIELD_GET(FDMA_INTR_DB_INTR_DB, x)
+
+/*      FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA          __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA             GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+       FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+       FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/*      FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR             __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+#define FDMA_INTR_ERR_INTR_PORT_ERR              GENMASK(9, 8)
+#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\
+       FIELD_PREP(FDMA_INTR_ERR_INTR_PORT_ERR, x)
+#define FDMA_INTR_ERR_INTR_PORT_ERR_GET(x)\
+       FIELD_GET(FDMA_INTR_ERR_INTR_PORT_ERR, x)
+
+#define FDMA_INTR_ERR_INTR_CH_ERR                GENMASK(7, 0)
+#define FDMA_INTR_ERR_INTR_CH_ERR_SET(x)\
+       FIELD_PREP(FDMA_INTR_ERR_INTR_CH_ERR, x)
+#define FDMA_INTR_ERR_INTR_CH_ERR_GET(x)\
+       FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x)
+
+/*      FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS               __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+#define FDMA_ERRORS_ERR_XTR_WR                   GENMASK(31, 30)
+#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_XTR_WR, x)
+#define FDMA_ERRORS_ERR_XTR_WR_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_XTR_WR, x)
+
+#define FDMA_ERRORS_ERR_XTR_OVF                  GENMASK(29, 28)
+#define FDMA_ERRORS_ERR_XTR_OVF_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_XTR_OVF, x)
+#define FDMA_ERRORS_ERR_XTR_OVF_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_XTR_OVF, x)
+
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF           GENMASK(27, 26)
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x)
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x)
+
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL            GENMASK(25, 24)
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x)
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x)
+
+#define FDMA_ERRORS_ERR_DCB_RD                   GENMASK(23, 16)
+#define FDMA_ERRORS_ERR_DCB_RD_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_DCB_RD, x)
+#define FDMA_ERRORS_ERR_DCB_RD_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_DCB_RD, x)
+
+#define FDMA_ERRORS_ERR_INJ_RD                   GENMASK(15, 10)
+#define FDMA_ERRORS_ERR_INJ_RD_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_INJ_RD, x)
+#define FDMA_ERRORS_ERR_INJ_RD_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_INJ_RD, x)
+
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC          GENMASK(9, 8)
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x)
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x)
+
+#define FDMA_ERRORS_ERR_CH_WR                    GENMASK(7, 0)
+#define FDMA_ERRORS_ERR_CH_WR_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_CH_WR, x)
+#define FDMA_ERRORS_ERR_CH_WR_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x)
+
+/*      FDMA:FDMA:FDMA_ERRORS_2 */
+#define FDMA_ERRORS_2             __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
+
+#define FDMA_ERRORS_2_ERR_XTR_FRAG               GENMASK(1, 0)
+#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
+#define FDMA_ERRORS_2_ERR_XTR_FRAG_GET(x)\
+       FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
+
+/*      FDMA:FDMA:FDMA_CTRL */
+#define FDMA_CTRL                 __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
+
+#define FDMA_CTRL_NRESET                         BIT(0)
+#define FDMA_CTRL_NRESET_SET(x)\
+       FIELD_PREP(FDMA_CTRL_NRESET, x)
+#define FDMA_CTRL_NRESET_GET(x)\
+       FIELD_GET(FDMA_CTRL_NRESET, x)
+
+/*      DEVCPU_GCB:CHIP_REGS:CHIP_ID */
+#define GCB_CHIP_ID               __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
+
+#define GCB_CHIP_ID_REV_ID                       GENMASK(31, 28)
+#define GCB_CHIP_ID_REV_ID_SET(x)\
+       FIELD_PREP(GCB_CHIP_ID_REV_ID, x)
+#define GCB_CHIP_ID_REV_ID_GET(x)\
+       FIELD_GET(GCB_CHIP_ID_REV_ID, x)
+
+#define GCB_CHIP_ID_PART_ID                      GENMASK(27, 12)
+#define GCB_CHIP_ID_PART_ID_SET(x)\
+       FIELD_PREP(GCB_CHIP_ID_PART_ID, x)
+#define GCB_CHIP_ID_PART_ID_GET(x)\
+       FIELD_GET(GCB_CHIP_ID_PART_ID, x)
+
+#define GCB_CHIP_ID_MFG_ID                       GENMASK(11, 1)
+#define GCB_CHIP_ID_MFG_ID_SET(x)\
+       FIELD_PREP(GCB_CHIP_ID_MFG_ID, x)
+#define GCB_CHIP_ID_MFG_ID_GET(x)\
+       FIELD_GET(GCB_CHIP_ID_MFG_ID, x)
+
+#define GCB_CHIP_ID_ONE                          BIT(0)
+#define GCB_CHIP_ID_ONE_SET(x)\
+       FIELD_PREP(GCB_CHIP_ID_ONE, x)
+#define GCB_CHIP_ID_ONE_GET(x)\
+       FIELD_GET(GCB_CHIP_ID_ONE, x)
+
+/*      DEVCPU_GCB:CHIP_REGS:SOFT_RST */
+#define GCB_SOFT_RST              __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
+
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST            BIT(2)
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\
+       FIELD_PREP(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST_GET(x)\
+       FIELD_GET(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
+
+#define GCB_SOFT_RST_SOFT_SWC_RST                BIT(1)
+#define GCB_SOFT_RST_SOFT_SWC_RST_SET(x)\
+       FIELD_PREP(GCB_SOFT_RST_SOFT_SWC_RST, x)
+#define GCB_SOFT_RST_SOFT_SWC_RST_GET(x)\
+       FIELD_GET(GCB_SOFT_RST_SOFT_SWC_RST, x)
+
+#define GCB_SOFT_RST_SOFT_CHIP_RST               BIT(0)
+#define GCB_SOFT_RST_SOFT_CHIP_RST_SET(x)\
+       FIELD_PREP(GCB_SOFT_RST_SOFT_CHIP_RST, x)
+#define GCB_SOFT_RST_SOFT_CHIP_RST_GET(x)\
+       FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x)
+
+/*      DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
+#define GCB_HW_SGPIO_SD_CFG       __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
+
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA          BIT(1)
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\
+       FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x)
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_GET(x)\
+       FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x)
+
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL           BIT(0)
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_SET(x)\
+       FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_GET(x)\
+       FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
+
+/*      DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
+
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\
+       FIELD_PREP(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_GET(x)\
+       FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+
+/*      DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
+#define GCB_SIO_CLOCK(g)          __REG(TARGET_GCB, 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
+
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ               GENMASK(19, 8)
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\
+       FIELD_PREP(GCB_SIO_CLOCK_SIO_CLK_FREQ, x)
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ_GET(x)\
+       FIELD_GET(GCB_SIO_CLOCK_SIO_CLK_FREQ, x)
+
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD             GENMASK(7, 0)
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(x)\
+       FIELD_PREP(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
+       FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+
+/*      HSCH:HSCH_MISC:SYS_CLK_PER */
+#define HSCH_SYS_CLK_PER          __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
+
+#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS       GENMASK(7, 0)
+#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(x)\
+       FIELD_PREP(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
+       FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+
+/*      HSCH:SYSTEM:FLUSH_CTRL */
+#define HSCH_FLUSH_CTRL           __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
+
+#define HSCH_FLUSH_CTRL_FLUSH_ENA                BIT(27)
+#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_ENA, x)
+#define HSCH_FLUSH_CTRL_FLUSH_ENA_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_ENA, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_SRC                BIT(26)
+#define HSCH_FLUSH_CTRL_FLUSH_SRC_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SRC, x)
+#define HSCH_FLUSH_CTRL_FLUSH_SRC_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SRC, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_DST                BIT(25)
+#define HSCH_FLUSH_CTRL_FLUSH_DST_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_DST, x)
+#define HSCH_FLUSH_CTRL_FLUSH_DST_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_DST, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_PORT               GENMASK(24, 18)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE              BIT(17)
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x)
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_SE                 BIT(16)
+#define HSCH_FLUSH_CTRL_FLUSH_SE_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SE, x)
+#define HSCH_FLUSH_CTRL_FLUSH_SE_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SE, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_HIER               GENMASK(15, 0)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+
+/*      HSCH:SYSTEM:PORT_MODE */
+#define HSCH_PORT_MODE(r)         __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
+
+#define HSCH_PORT_MODE_DEQUEUE_DIS               BIT(4)
+#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_DEQUEUE_DIS, x)
+#define HSCH_PORT_MODE_DEQUEUE_DIS_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_DEQUEUE_DIS, x)
+
+#define HSCH_PORT_MODE_AGE_DIS                   BIT(3)
+#define HSCH_PORT_MODE_AGE_DIS_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_AGE_DIS, x)
+#define HSCH_PORT_MODE_AGE_DIS_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_AGE_DIS, x)
+
+#define HSCH_PORT_MODE_TRUNC_ENA                 BIT(2)
+#define HSCH_PORT_MODE_TRUNC_ENA_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_TRUNC_ENA, x)
+#define HSCH_PORT_MODE_TRUNC_ENA_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_TRUNC_ENA, x)
+
+#define HSCH_PORT_MODE_EIR_REMARK_ENA            BIT(1)
+#define HSCH_PORT_MODE_EIR_REMARK_ENA_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_EIR_REMARK_ENA, x)
+#define HSCH_PORT_MODE_EIR_REMARK_ENA_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_EIR_REMARK_ENA, x)
+
+#define HSCH_PORT_MODE_CPU_PRIO_MODE             BIT(0)
+#define HSCH_PORT_MODE_CPU_PRIO_MODE_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
+#define HSCH_PORT_MODE_CPU_PRIO_MODE_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
+
+/*      HSCH:SYSTEM:OUTB_SHARE_ENA */
+#define HSCH_OUTB_SHARE_ENA(r)    __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
+
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA       GENMASK(7, 0)
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\
+       FIELD_PREP(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_GET(x)\
+       FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
+
+/*      HSCH:MMGT:RESET_CFG */
+#define HSCH_RESET_CFG            __REG(TARGET_HSCH, 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
+
+#define HSCH_RESET_CFG_CORE_ENA                  BIT(0)
+#define HSCH_RESET_CFG_CORE_ENA_SET(x)\
+       FIELD_PREP(HSCH_RESET_CFG_CORE_ENA, x)
+#define HSCH_RESET_CFG_CORE_ENA_GET(x)\
+       FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x)
+
+/*      HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH, 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
+
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY    GENMASK(7, 0)
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\
+       FIELD_PREP(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
+       FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+
+/*      LRN:COMMON:COMMON_ACCESS_CTRL */
+#define LRN_COMMON_ACCESS_CTRL    __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE BIT(19)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW GENMASK(18, 5)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD    GENMASK(4, 1)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x)
+
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT BIT(0)
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
+
+/*      LRN:COMMON:MAC_ACCESS_CFG_0 */
+#define LRN_MAC_ACCESS_CFG_0      __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID       GENMASK(28, 16)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x)
+
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB   GENMASK(15, 0)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
+
+/*      LRN:COMMON:MAC_ACCESS_CFG_1 */
+#define LRN_MAC_ACCESS_CFG_1      __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
+
+/*      LRN:COMMON:MAC_ACCESS_CFG_2 */
+#define LRN_MAC_ACCESS_CFG_2      __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL BIT(27)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU    GENMASK(26, 24)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY  BIT(23)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE BIT(22)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR    BIT(21)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG  GENMASK(20, 19)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL GENMASK(18, 17)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED    BIT(16)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD       BIT(15)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE GENMASK(14, 12)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR      GENMASK(11, 0)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
+
+/*      LRN:COMMON:MAC_ACCESS_CFG_3 */
+#define LRN_MAC_ACCESS_CFG_3      __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+
+/*      LRN:COMMON:SCAN_NEXT_CFG */
+#define LRN_SCAN_NEXT_CFG         __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL GENMASK(18, 17)
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL    GENMASK(16, 15)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA BIT(14)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA BIT(13)
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA BIT(12)
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA BIT(11)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA BIT(10)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA BIT(9)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA BIT(8)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA BIT(7)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK GENMASK(6, 3)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x)
+
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA BIT(2)
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA         BIT(1)
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA        BIT(0)
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
+
+/*      LRN:COMMON:SCAN_NEXT_CFG_1 */
+#define LRN_SCAN_NEXT_CFG_1       __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
+
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR   GENMASK(30, 16)
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x)
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x)
+
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK GENMASK(14, 0)
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
+
+/*      LRN:COMMON:AUTOAGE_CFG */
+#define LRN_AUTOAGE_CFG(r)        __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
+
+#define LRN_AUTOAGE_CFG_UNIT_SIZE                GENMASK(29, 28)
+#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_UNIT_SIZE, x)
+#define LRN_AUTOAGE_CFG_UNIT_SIZE_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_UNIT_SIZE, x)
+
+#define LRN_AUTOAGE_CFG_PERIOD_VAL               GENMASK(27, 0)
+#define LRN_AUTOAGE_CFG_PERIOD_VAL_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
+#define LRN_AUTOAGE_CFG_PERIOD_VAL_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
+
+/*      LRN:COMMON:AUTOAGE_CFG_1 */
+#define LRN_AUTOAGE_CFG_1         __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
+
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA     BIT(25)
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x)
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x)
+
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN GENMASK(24, 15)
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x)
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x)
+
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS        GENMASK(14, 7)
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x)
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x)
+
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA    BIT(6)
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x)
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT     GENMASK(5, 2)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT BIT(1)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA         BIT(0)
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
+
+/*      LRN:COMMON:AUTOAGE_CFG_2 */
+#define LRN_AUTOAGE_CFG_2         __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
+
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW               GENMASK(17, 4)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS    GENMASK(3, 0)
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
+#define PCEP_RCTRL_2_OUT_0        __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
+
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE              GENMASK(7, 0)
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_MSG_CODE, x)
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_MSG_CODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_TAG                   GENMASK(15, 8)
+#define PCEP_RCTRL_2_OUT_0_TAG_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG, x)
+#define PCEP_RCTRL_2_OUT_0_TAG_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG, x)
+
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN     BIT(16)
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x)
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x)
+
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS           BIT(19)
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x)
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x)
+
+#define PCEP_RCTRL_2_OUT_0_SNP                   BIT(20)
+#define PCEP_RCTRL_2_OUT_0_SNP_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_SNP, x)
+#define PCEP_RCTRL_2_OUT_0_SNP_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_SNP, x)
+
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD       BIT(22)
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x)
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x)
+
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN  BIT(23)
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x)
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x)
+
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE        BIT(28)
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x)
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE           BIT(29)
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x)
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_REGION_EN             BIT(31)
+#define PCEP_RCTRL_2_OUT_0_REGION_EN_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
+#define PCEP_RCTRL_2_OUT_0_REGION_EN_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_OUT_0       __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
+
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW          GENMASK(15, 0)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_GET(x)\
+       FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x)
+
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW          GENMASK(31, 16)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_GET(x)\
+       FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_OUT_0       __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LIM_OUT_0       __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
+
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW        GENMASK(15, 0)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_GET(x)\
+       FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x)
+
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW        GENMASK(31, 16)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_GET(x)\
+       FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_TGT_OUT_0   __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_TGT_OUT_0   __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_LIM_OUT_0   __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
+
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_GET(x)\
+       FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x)
+
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW GENMASK(31, 2)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_GET(x)\
+       FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS10G_BR_PCS_CFG(t)      __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS10G_BR_PCS_CFG_PCS_ENA                BIT(31)
+#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS10G_BR_PCS_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA       BIT(30)
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX             GENMASK(29, 24)
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP           BIT(18)
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA             BIT(15)
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS             BIT(14)
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE           BIT(13)
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE         BIT(12)
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP           BIT(7)
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA       BIT(6)
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE           BIT(4)
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE         BIT(3)
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS10G_BR_PCS_SD_CFG(t)   __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL              BIT(8)
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_POL              BIT(4)
+#define PCS10G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA              BIT(0)
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS25G_BR_PCS_CFG(t)      __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS25G_BR_PCS_CFG_PCS_ENA                BIT(31)
+#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS25G_BR_PCS_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA       BIT(30)
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX             GENMASK(29, 24)
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP           BIT(18)
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA             BIT(15)
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS             BIT(14)
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE           BIT(13)
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE         BIT(12)
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP           BIT(7)
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA       BIT(6)
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE           BIT(4)
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE         BIT(3)
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS25G_BR_PCS_SD_CFG(t)   __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL              BIT(8)
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_POL              BIT(4)
+#define PCS25G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA              BIT(0)
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS5G_BR_PCS_CFG(t)       __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS5G_BR_PCS_CFG_PCS_ENA                 BIT(31)
+#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS5G_BR_PCS_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA        BIT(30)
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX              GENMASK(29, 24)
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP            BIT(18)
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA              BIT(15)
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS              BIT(14)
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE            BIT(13)
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE          BIT(12)
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP            BIT(7)
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA        BIT(6)
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE            BIT(4)
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE          BIT(3)
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS5G_BR_PCS_SD_CFG(t)    __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL               BIT(8)
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_POL               BIT(4)
+#define PCS5G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA               BIT(0)
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/*      PORT_CONF:HW_CFG:DEV5G_MODES */
+#define PORT_CONF_DEV5G_MODES     __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE      BIT(0)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE      BIT(1)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE      BIT(2)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE      BIT(3)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE      BIT(4)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE      BIT(5)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE      BIT(6)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE      BIT(7)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE      BIT(8)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE      BIT(9)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE     BIT(10)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE     BIT(11)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE     BIT(12)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
+
+/*      PORT_CONF:HW_CFG:DEV10G_MODES */
+#define PORT_CONF_DEV10G_MODES    __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE   BIT(0)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE   BIT(1)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE   BIT(2)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE   BIT(3)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE   BIT(4)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE   BIT(5)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE   BIT(6)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE   BIT(7)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE   BIT(8)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE   BIT(9)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE   BIT(10)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE   BIT(11)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
+
+/*      PORT_CONF:HW_CFG:DEV25G_MODES */
+#define PORT_CONF_DEV25G_MODES    __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE   BIT(0)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE   BIT(1)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE   BIT(2)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE   BIT(3)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE   BIT(4)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE   BIT(5)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE   BIT(6)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE   BIT(7)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
+
+/*      PORT_CONF:HW_CFG:QSGMII_ENA */
+#define PORT_CONF_QSGMII_ENA      __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0        BIT(0)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1        BIT(1)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2        BIT(2)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3        BIT(3)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4        BIT(4)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5        BIT(5)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6        BIT(6)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7        BIT(7)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8        BIT(8)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9        BIT(9)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10       BIT(10)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11       BIT(11)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
+
+/*      PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
+#define PORT_CONF_USGMII_CFG(g)   __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
+
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM        BIT(9)
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x)
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x)
+
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM      BIT(8)
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x)
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x)
+
+#define PORT_CONF_USGMII_CFG_FLIP_LANES          BIT(7)
+#define PORT_CONF_USGMII_CFG_FLIP_LANES_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_FLIP_LANES, x)
+#define PORT_CONF_USGMII_CFG_FLIP_LANES_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_FLIP_LANES, x)
+
+#define PORT_CONF_USGMII_CFG_SHYST_DIS           BIT(6)
+#define PORT_CONF_USGMII_CFG_SHYST_DIS_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_SHYST_DIS, x)
+#define PORT_CONF_USGMII_CFG_SHYST_DIS_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_SHYST_DIS, x)
+
+#define PORT_CONF_USGMII_CFG_E_DET_ENA           BIT(5)
+#define PORT_CONF_USGMII_CFG_E_DET_ENA_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_E_DET_ENA, x)
+#define PORT_CONF_USGMII_CFG_E_DET_ENA_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_E_DET_ENA, x)
+
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA          BIT(4)
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_USE_I1_ENA, x)
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_USE_I1_ENA, x)
+
+#define PORT_CONF_USGMII_CFG_QUAD_MODE           BIT(1)
+#define PORT_CONF_USGMII_CFG_QUAD_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+
+/*      QFWD:SYSTEM:SWITCH_PORT_MODE */
+#define QFWD_SWITCH_PORT_MODE(r)  __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
+
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA           BIT(19)
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_PORT_ENA, x)
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_PORT_ENA, x)
+
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY        GENMASK(18, 10)
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x)
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x)
+
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD          GENMASK(9, 6)
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x)
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x)
+
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE  BIT(5)
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x)
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x)
+
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING     BIT(4)
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x)
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING     BIT(3)
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x)
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE   BIT(2)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS    BIT(1)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x)
+
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE      BIT(0)
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+
+/*      QRES:RES_CTRL:RES_CFG */
+#define QRES_RES_CFG(g)           __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
+
+#define QRES_RES_CFG_WM_HIGH                     GENMASK(11, 0)
+#define QRES_RES_CFG_WM_HIGH_SET(x)\
+       FIELD_PREP(QRES_RES_CFG_WM_HIGH, x)
+#define QRES_RES_CFG_WM_HIGH_GET(x)\
+       FIELD_GET(QRES_RES_CFG_WM_HIGH, x)
+
+/*      QRES:RES_CTRL:RES_STAT */
+#define QRES_RES_STAT(g)          __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
+
+#define QRES_RES_STAT_MAXUSE                     GENMASK(20, 0)
+#define QRES_RES_STAT_MAXUSE_SET(x)\
+       FIELD_PREP(QRES_RES_STAT_MAXUSE, x)
+#define QRES_RES_STAT_MAXUSE_GET(x)\
+       FIELD_GET(QRES_RES_STAT_MAXUSE, x)
+
+/*      QRES:RES_CTRL:RES_STAT_CUR */
+#define QRES_RES_STAT_CUR(g)      __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
+
+#define QRES_RES_STAT_CUR_INUSE                  GENMASK(20, 0)
+#define QRES_RES_STAT_CUR_INUSE_SET(x)\
+       FIELD_PREP(QRES_RES_STAT_CUR_INUSE, x)
+#define QRES_RES_STAT_CUR_INUSE_GET(x)\
+       FIELD_GET(QRES_RES_STAT_CUR_INUSE, x)
+
+/*      DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r)         __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+
+#define QS_XTR_GRP_CFG_MODE                      GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_SET(x)\
+       FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
+#define QS_XTR_GRP_CFG_MODE_GET(x)\
+       FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
+
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS           BIT(1)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(x)\
+       FIELD_PREP(QS_XTR_GRP_CFG_STATUS_WORD_POS, x)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS_GET(x)\
+       FIELD_GET(QS_XTR_GRP_CFG_STATUS_WORD_POS, x)
+
+#define QS_XTR_GRP_CFG_BYTE_SWAP                 BIT(0)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
+       FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
+       FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+
+/*      DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r)              __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+
+/*      DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH              __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+
+#define QS_XTR_FLUSH_FLUSH                       GENMASK(1, 0)
+#define QS_XTR_FLUSH_FLUSH_SET(x)\
+       FIELD_PREP(QS_XTR_FLUSH_FLUSH, x)
+#define QS_XTR_FLUSH_FLUSH_GET(x)\
+       FIELD_GET(QS_XTR_FLUSH_FLUSH, x)
+
+/*      DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT       __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT         GENMASK(1, 0)
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\
+       FIELD_PREP(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT_GET(x)\
+       FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
+
+/*      DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r)         __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+
+#define QS_INJ_GRP_CFG_MODE                      GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_SET(x)\
+       FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
+#define QS_INJ_GRP_CFG_MODE_GET(x)\
+       FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
+
+#define QS_INJ_GRP_CFG_BYTE_SWAP                 BIT(0)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
+       FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
+       FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+
+/*      DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r)              __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+
+/*      DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r)            __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+
+#define QS_INJ_CTRL_GAP_SIZE                     GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
+#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
+
+#define QS_INJ_CTRL_ABORT                        BIT(20)
+#define QS_INJ_CTRL_ABORT_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_ABORT, x)
+#define QS_INJ_CTRL_ABORT_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_ABORT, x)
+
+#define QS_INJ_CTRL_EOF                          BIT(19)
+#define QS_INJ_CTRL_EOF_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_EOF, x)
+#define QS_INJ_CTRL_EOF_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_EOF, x)
+
+#define QS_INJ_CTRL_SOF                          BIT(18)
+#define QS_INJ_CTRL_SOF_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_SOF, x)
+#define QS_INJ_CTRL_SOF_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_SOF, x)
+
+#define QS_INJ_CTRL_VLD_BYTES                    GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
+#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
+
+/*      DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS             __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+
+#define QS_INJ_STATUS_WMARK_REACHED              GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
+       FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
+#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
+       FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
+
+#define QS_INJ_STATUS_FIFO_RDY                   GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
+       FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
+#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
+       FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
+
+#define QS_INJ_STATUS_INJ_IN_PROGRESS            GENMASK(1, 0)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_SET(x)\
+       FIELD_PREP(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_GET(x)\
+       FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
+
+/*      QSYS:PAUSE_CFG:PAUSE_CFG */
+#define QSYS_PAUSE_CFG(r)         __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
+
+#define QSYS_PAUSE_CFG_PAUSE_START               GENMASK(25, 14)
+#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\
+       FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_START, x)
+#define QSYS_PAUSE_CFG_PAUSE_START_GET(x)\
+       FIELD_GET(QSYS_PAUSE_CFG_PAUSE_START, x)
+
+#define QSYS_PAUSE_CFG_PAUSE_STOP                GENMASK(13, 2)
+#define QSYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
+       FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+#define QSYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
+       FIELD_GET(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+
+#define QSYS_PAUSE_CFG_PAUSE_ENA                 BIT(1)
+#define QSYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
+       FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_ENA, x)
+#define QSYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
+       FIELD_GET(QSYS_PAUSE_CFG_PAUSE_ENA, x)
+
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA   BIT(0)
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_SET(x)\
+       FIELD_PREP(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_GET(x)\
+       FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
+
+/*      QSYS:PAUSE_CFG:ATOP */
+#define QSYS_ATOP(r)              __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
+
+#define QSYS_ATOP_ATOP                           GENMASK(11, 0)
+#define QSYS_ATOP_ATOP_SET(x)\
+       FIELD_PREP(QSYS_ATOP_ATOP, x)
+#define QSYS_ATOP_ATOP_GET(x)\
+       FIELD_GET(QSYS_ATOP_ATOP, x)
+
+/*      QSYS:PAUSE_CFG:FWD_PRESSURE */
+#define QSYS_FWD_PRESSURE(r)      __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
+
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE           GENMASK(11, 1)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\
+       FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE, x)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_GET(x)\
+       FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE, x)
+
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS       BIT(0)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(x)\
+       FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_GET(x)\
+       FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
+
+/*      QSYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define QSYS_ATOP_TOT_CFG         __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
+
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT               GENMASK(11, 0)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\
+       FIELD_PREP(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\
+       FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+
+/*      QSYS:CALCFG:CAL_AUTO */
+#define QSYS_CAL_AUTO(r)          __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
+
+#define QSYS_CAL_AUTO_CAL_AUTO                   GENMASK(29, 0)
+#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\
+       FIELD_PREP(QSYS_CAL_AUTO_CAL_AUTO, x)
+#define QSYS_CAL_AUTO_CAL_AUTO_GET(x)\
+       FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x)
+
+/*      QSYS:CALCFG:CAL_CTRL */
+#define QSYS_CAL_CTRL             __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
+
+#define QSYS_CAL_CTRL_CAL_MODE                   GENMASK(14, 11)
+#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\
+       FIELD_PREP(QSYS_CAL_CTRL_CAL_MODE, x)
+#define QSYS_CAL_CTRL_CAL_MODE_GET(x)\
+       FIELD_GET(QSYS_CAL_CTRL_CAL_MODE, x)
+
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE        GENMASK(10, 1)
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(x)\
+       FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x)
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_GET(x)\
+       FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x)
+
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR             BIT(0)
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_SET(x)\
+       FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(x)\
+       FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
+
+/*      QSYS:RAM_CTRL:RAM_INIT */
+#define QSYS_RAM_INIT             __REG(TARGET_QSYS, 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_RAM_INIT_RAM_INIT                   BIT(1)
+#define QSYS_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(QSYS_RAM_INIT_RAM_INIT, x)
+#define QSYS_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(QSYS_RAM_INIT_RAM_INIT, x)
+
+#define QSYS_RAM_INIT_RAM_CFG_HOOK               BIT(0)
+#define QSYS_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
+#define QSYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      REW:COMMON:OWN_UPSID */
+#define REW_OWN_UPSID(r)          __REG(TARGET_REW, 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
+
+#define REW_OWN_UPSID_OWN_UPSID                  GENMASK(4, 0)
+#define REW_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(REW_OWN_UPSID_OWN_UPSID, x)
+#define REW_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x)
+
+/*      REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g)      __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_PCP               GENMASK(15, 13)
+#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\
+       FIELD_PREP(REW_PORT_VLAN_CFG_PORT_PCP, x)
+#define REW_PORT_VLAN_CFG_PORT_PCP_GET(x)\
+       FIELD_GET(REW_PORT_VLAN_CFG_PORT_PCP, x)
+
+#define REW_PORT_VLAN_CFG_PORT_DEI               BIT(12)
+#define REW_PORT_VLAN_CFG_PORT_DEI_SET(x)\
+       FIELD_PREP(REW_PORT_VLAN_CFG_PORT_DEI, x)
+#define REW_PORT_VLAN_CFG_PORT_DEI_GET(x)\
+       FIELD_GET(REW_PORT_VLAN_CFG_PORT_DEI, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID               GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+       FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+       FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/*      REW:PORT:TAG_CTRL */
+#define REW_TAG_CTRL(g)           __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
+
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED     BIT(13)
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x)
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x)
+
+#define REW_TAG_CTRL_TAG_CFG                     GENMASK(12, 11)
+#define REW_TAG_CTRL_TAG_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_CFG, x)
+#define REW_TAG_CTRL_TAG_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_CFG, x)
+
+#define REW_TAG_CTRL_TAG_TPID_CFG                GENMASK(10, 8)
+#define REW_TAG_CTRL_TAG_TPID_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_TPID_CFG, x)
+#define REW_TAG_CTRL_TAG_TPID_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_TPID_CFG, x)
+
+#define REW_TAG_CTRL_TAG_VID_CFG                 GENMASK(7, 6)
+#define REW_TAG_CTRL_TAG_VID_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_VID_CFG, x)
+#define REW_TAG_CTRL_TAG_VID_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_VID_CFG, x)
+
+#define REW_TAG_CTRL_TAG_PCP_CFG                 GENMASK(5, 3)
+#define REW_TAG_CTRL_TAG_PCP_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_PCP_CFG, x)
+#define REW_TAG_CTRL_TAG_PCP_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_PCP_CFG, x)
+
+#define REW_TAG_CTRL_TAG_DEI_CFG                 GENMASK(2, 0)
+#define REW_TAG_CTRL_TAG_DEI_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_DEI_CFG, x)
+#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
+
+/*      REW:RAM_CTRL:RAM_INIT */
+#define REW_RAM_INIT              __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
+
+#define REW_RAM_INIT_RAM_INIT                    BIT(1)
+#define REW_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(REW_RAM_INIT_RAM_INIT, x)
+#define REW_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(REW_RAM_INIT_RAM_INIT, x)
+
+#define REW_RAM_INIT_RAM_CFG_HOOK                BIT(0)
+#define REW_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(REW_RAM_INIT_RAM_CFG_HOOK, x)
+#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      VCAP_SUPER:RAM_CTRL:RAM_INIT */
+#define VCAP_SUPER_RAM_INIT       __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_SUPER_RAM_INIT_RAM_INIT             BIT(1)
+#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_INIT, x)
+#define VCAP_SUPER_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_INIT, x)
+
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK         BIT(0)
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      VOP:RAM_CTRL:RAM_INIT */
+#define VOP_RAM_INIT              __REG(TARGET_VOP, 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
+
+#define VOP_RAM_INIT_RAM_INIT                    BIT(1)
+#define VOP_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(VOP_RAM_INIT_RAM_INIT, x)
+#define VOP_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(VOP_RAM_INIT_RAM_INIT, x)
+
+#define VOP_RAM_INIT_RAM_CFG_HOOK                BIT(0)
+#define VOP_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(VOP_RAM_INIT_RAM_CFG_HOOK, x)
+#define VOP_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      XQS:SYSTEM:STAT_CFG */
+#define XQS_STAT_CFG              __REG(TARGET_XQS, 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
+
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT             GENMASK(21, 18)
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\
+       FIELD_PREP(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\
+       FIELD_GET(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
+
+#define XQS_STAT_CFG_STAT_VIEW                   GENMASK(17, 5)
+#define XQS_STAT_CFG_STAT_VIEW_SET(x)\
+       FIELD_PREP(XQS_STAT_CFG_STAT_VIEW, x)
+#define XQS_STAT_CFG_STAT_VIEW_GET(x)\
+       FIELD_GET(XQS_STAT_CFG_STAT_VIEW, x)
+
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY           BIT(4)
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_SET(x)\
+       FIELD_PREP(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x)
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_GET(x)\
+       FIELD_GET(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x)
+
+#define XQS_STAT_CFG_STAT_WRAP_DIS               GENMASK(3, 0)
+#define XQS_STAT_CFG_STAT_WRAP_DIS_SET(x)\
+       FIELD_PREP(XQS_STAT_CFG_STAT_WRAP_DIS, x)
+#define XQS_STAT_CFG_STAT_WRAP_DIS_GET(x)\
+       FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x)
+
+/*      XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
+#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP    GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\
+       FIELD_PREP(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_GET(x)\
+       FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+
+/*      XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
+#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP  GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\
+       FIELD_PREP(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_GET(x)\
+       FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+
+/*      XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
+#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP  GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\
+       FIELD_PREP(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_GET(x)\
+       FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+
+/*      XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
+#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM  GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\
+       FIELD_PREP(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_GET(x)\
+       FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+
+/*      XQS:STAT:CNT */
+#define XQS_CNT(g)                __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+
+#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
new file mode 100644 (file)
index 0000000..9d485a9
--- /dev/null
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+/* The IFH bit position of the first VSTAX bit. This is because the
+ * VSTAX bit positions in Data sheet is starting from zero.
+ */
+#define VSTAX 73
+
+static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
+{
+       u8 *ifh_hdr = ifh;
+       /* Calculate the Start IFH byte position of this IFH bit position */
+       u32 byte = (35 - (pos / 8));
+       /* Calculate the Start bit position in the Start IFH byte */
+       u32 bit  = (pos % 8);
+       u64 encode = GENMASK(bit + width - 1, bit) & (value << bit);
+
+       /* Max width is 5 bytes - 40 bits. In worst case this will
+        * spread over 6 bytes - 48 bits
+        */
+       compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
+
+       /* The b0-b7 goes into the start IFH byte */
+       if (encode & 0xFF)
+               ifh_hdr[byte] |= (u8)((encode & 0xFF));
+       /* The b8-b15 goes into the next IFH byte */
+       if (encode & 0xFF00)
+               ifh_hdr[byte - 1] |= (u8)((encode & 0xFF00) >> 8);
+       /* The b16-b23 goes into the next IFH byte */
+       if (encode & 0xFF0000)
+               ifh_hdr[byte - 2] |= (u8)((encode & 0xFF0000) >> 16);
+       /* The b24-b31 goes into the next IFH byte */
+       if (encode & 0xFF000000)
+               ifh_hdr[byte - 3] |= (u8)((encode & 0xFF000000) >> 24);
+       /* The b32-b39 goes into the next IFH byte */
+       if (encode & 0xFF00000000)
+               ifh_hdr[byte - 4] |= (u8)((encode & 0xFF00000000) >> 32);
+       /* The b40-b47 goes into the next IFH byte */
+       if (encode & 0xFF0000000000)
+               ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40);
+}
+
+static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
+{
+       /* VSTAX.RSV = 1. MSBit must be 1 */
+       ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79,  1);
+       /* VSTAX.INGR_DROP_MODE = Enable. Don't make head-of-line blocking */
+       ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 55,  1);
+       /* MISC.CPU_MASK/DPORT = Destination port */
+       ifh_encode_bitfield(ifh_hdr, portno,   29, 8);
+       /* MISC.PIPELINE_PT */
+       ifh_encode_bitfield(ifh_hdr, 16,       37, 5);
+       /* MISC.PIPELINE_ACT */
+       ifh_encode_bitfield(ifh_hdr, 1,        42, 3);
+       /* FWD.SRC_PORT = CPU */
+       ifh_encode_bitfield(ifh_hdr, SPX5_PORT_CPU, 46, 7);
+       /* FWD.SFLOW_ID (disable SFlow sampling) */
+       ifh_encode_bitfield(ifh_hdr, 124,      57, 7);
+       /* FWD.UPDATE_FCS = Enable. Enforce update of FCS. */
+       ifh_encode_bitfield(ifh_hdr, 1,        67, 1);
+}
+
+static int sparx5_port_open(struct net_device *ndev)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       int err = 0;
+
+       sparx5_port_enable(port, true);
+       err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
+       if (err) {
+               netdev_err(ndev, "Could not attach to PHY\n");
+               return err;
+       }
+
+       phylink_start(port->phylink);
+
+       if (!ndev->phydev) {
+               /* power up serdes */
+               port->conf.power_down = false;
+               if (port->conf.serdes_reset)
+                       err = sparx5_serdes_set(port->sparx5, port, &port->conf);
+               else
+                       err = phy_power_on(port->serdes);
+               if (err)
+                       netdev_err(ndev, "%s failed\n", __func__);
+       }
+
+       return err;
+}
+
+static int sparx5_port_stop(struct net_device *ndev)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       int err = 0;
+
+       sparx5_port_enable(port, false);
+       phylink_stop(port->phylink);
+       phylink_disconnect_phy(port->phylink);
+
+       if (!ndev->phydev) {
+               /* power down serdes */
+               port->conf.power_down = true;
+               if (port->conf.serdes_reset)
+                       err = sparx5_serdes_set(port->sparx5, port, &port->conf);
+               else
+                       err = phy_power_off(port->serdes);
+               if (err)
+                       netdev_err(ndev, "%s failed\n", __func__);
+       }
+       return 0;
+}
+
+static void sparx5_set_rx_mode(struct net_device *dev)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+
+       if (!test_bit(port->portno, sparx5->bridge_mask))
+               __dev_mc_sync(dev, sparx5_mc_sync, sparx5_mc_unsync);
+}
+
+static int sparx5_port_get_phys_port_name(struct net_device *dev,
+                                         char *buf, size_t len)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       int ret;
+
+       ret = snprintf(buf, len, "p%d", port->portno);
+       if (ret >= len)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int sparx5_set_mac_address(struct net_device *dev, void *p)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+       const struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       /* Remove current */
+       sparx5_mact_forget(sparx5, dev->dev_addr,  port->pvid);
+
+       /* Add new */
+       sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
+
+       /* Record the address */
+       ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+       return 0;
+}
+
+static int sparx5_get_port_parent_id(struct net_device *dev,
+                                    struct netdev_phys_item_id *ppid)
+{
+       struct sparx5_port *sparx5_port = netdev_priv(dev);
+       struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+       ppid->id_len = sizeof(sparx5->base_mac);
+       memcpy(&ppid->id, &sparx5->base_mac, ppid->id_len);
+
+       return 0;
+}
+
+static const struct net_device_ops sparx5_port_netdev_ops = {
+       .ndo_open               = sparx5_port_open,
+       .ndo_stop               = sparx5_port_stop,
+       .ndo_start_xmit         = sparx5_port_xmit_impl,
+       .ndo_set_rx_mode        = sparx5_set_rx_mode,
+       .ndo_get_phys_port_name = sparx5_port_get_phys_port_name,
+       .ndo_set_mac_address    = sparx5_set_mac_address,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_get_stats64        = sparx5_get_stats64,
+       .ndo_get_port_parent_id = sparx5_get_port_parent_id,
+};
+
+bool sparx5_netdevice_check(const struct net_device *dev)
+{
+       return dev && (dev->netdev_ops == &sparx5_port_netdev_ops);
+}
+
+struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
+{
+       struct sparx5_port *spx5_port;
+       struct net_device *ndev;
+       u64 val;
+
+       ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
+       if (!ndev)
+               return ERR_PTR(-ENOMEM);
+
+       SET_NETDEV_DEV(ndev, sparx5->dev);
+       spx5_port = netdev_priv(ndev);
+       spx5_port->ndev = ndev;
+       spx5_port->sparx5 = sparx5;
+       spx5_port->portno = portno;
+       sparx5_set_port_ifh(spx5_port->ifh, portno);
+
+       ndev->netdev_ops = &sparx5_port_netdev_ops;
+       ndev->ethtool_ops = &sparx5_ethtool_ops;
+
+       val = ether_addr_to_u64(sparx5->base_mac) + portno + 1;
+       u64_to_ether_addr(val, ndev->dev_addr);
+
+       return ndev;
+}
+
+int sparx5_register_netdevs(struct sparx5 *sparx5)
+{
+       int portno;
+       int err;
+
+       for (portno = 0; portno < SPX5_PORTS; portno++)
+               if (sparx5->ports[portno]) {
+                       err = register_netdev(sparx5->ports[portno]->ndev);
+                       if (err) {
+                               dev_err(sparx5->dev,
+                                       "port: %02u: netdev registration failed\n",
+                                       portno);
+                               return err;
+                       }
+                       sparx5_port_inj_timer_setup(sparx5->ports[portno]);
+               }
+       return 0;
+}
+
+void sparx5_destroy_netdevs(struct sparx5 *sparx5)
+{
+       struct sparx5_port *port;
+       int portno;
+
+       for (portno = 0; portno < SPX5_PORTS; portno++) {
+               port = sparx5->ports[portno];
+               if (port && port->phylink) {
+                       /* Disconnect the phy */
+                       rtnl_lock();
+                       sparx5_port_stop(port->ndev);
+                       phylink_disconnect_phy(port->phylink);
+                       rtnl_unlock();
+                       phylink_destroy(port->phylink);
+                       port->phylink = NULL;
+               }
+       }
+}
+
+void sparx5_unregister_netdevs(struct sparx5 *sparx5)
+{
+       int portno;
+
+       for (portno = 0; portno < SPX5_PORTS; portno++)
+               if (sparx5->ports[portno])
+                       unregister_netdev(sparx5->ports[portno]->ndev);
+}
+
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
new file mode 100644 (file)
index 0000000..09ca7a3
--- /dev/null
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define XTR_EOF_0     ntohl((__force __be32)0x80000000u)
+#define XTR_EOF_1     ntohl((__force __be32)0x80000001u)
+#define XTR_EOF_2     ntohl((__force __be32)0x80000002u)
+#define XTR_EOF_3     ntohl((__force __be32)0x80000003u)
+#define XTR_PRUNED    ntohl((__force __be32)0x80000004u)
+#define XTR_ABORT     ntohl((__force __be32)0x80000005u)
+#define XTR_ESCAPE    ntohl((__force __be32)0x80000006u)
+#define XTR_NOT_READY ntohl((__force __be32)0x80000007u)
+
+#define XTR_VALID_BYTES(x)      (4 - ((x) & 3))
+
+#define INJ_TIMEOUT_NS 50000
+
+struct frame_info {
+       int src_port;
+};
+
+static void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp)
+{
+       /* Start flush */
+       spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH);
+
+       /* Allow to drain */
+       mdelay(1);
+
+       /* All Queues normal */
+       spx5_wr(0, sparx5, QS_XTR_FLUSH);
+}
+
+static void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
+{
+       u8 *xtr_hdr = (u8 *)ifh;
+
+       /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */
+       u32 fwd =
+               ((u32)xtr_hdr[27] << 24) |
+               ((u32)xtr_hdr[28] << 16) |
+               ((u32)xtr_hdr[29] <<  8) |
+               ((u32)xtr_hdr[30] <<  0);
+       fwd = (fwd >> 5);
+       info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+}
+
+static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
+{
+       bool eof_flag = false, pruned_flag = false, abort_flag = false;
+       struct net_device *netdev;
+       struct sparx5_port *port;
+       struct frame_info fi;
+       int i, byte_cnt = 0;
+       struct sk_buff *skb;
+       u32 ifh[IFH_LEN];
+       u32 *rxbuf;
+
+       /* Get IFH */
+       for (i = 0; i < IFH_LEN; i++)
+               ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
+
+       /* Decode IFH (whats needed) */
+       sparx5_ifh_parse(ifh, &fi);
+
+       /* Map to port netdev */
+       port = fi.src_port < SPX5_PORTS ?
+               sparx5->ports[fi.src_port] : NULL;
+       if (!port || !port->ndev) {
+               dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
+               sparx5_xtr_flush(sparx5, grp);
+               return;
+       }
+
+       /* Have netdev, get skb */
+       netdev = port->ndev;
+       skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN);
+       if (!skb) {
+               sparx5_xtr_flush(sparx5, grp);
+               dev_err(sparx5->dev, "No skb allocated\n");
+               netdev->stats.rx_dropped++;
+               return;
+       }
+       rxbuf = (u32 *)skb->data;
+
+       /* Now, pull frame data */
+       while (!eof_flag) {
+               u32 val = spx5_rd(sparx5, QS_XTR_RD(grp));
+               u32 cmp = val;
+
+               if (byte_swap)
+                       cmp = ntohl((__force __be32)val);
+
+               switch (cmp) {
+               case XTR_NOT_READY:
+                       break;
+               case XTR_ABORT:
+                       /* No accompanying data */
+                       abort_flag = true;
+                       eof_flag = true;
+                       break;
+               case XTR_EOF_0:
+               case XTR_EOF_1:
+               case XTR_EOF_2:
+               case XTR_EOF_3:
+                       /* This assumes STATUS_WORD_POS == 1, Status
+                        * just after last data
+                        */
+                       byte_cnt -= (4 - XTR_VALID_BYTES(val));
+                       eof_flag = true;
+                       break;
+               case XTR_PRUNED:
+                       /* But get the last 4 bytes as well */
+                       eof_flag = true;
+                       pruned_flag = true;
+                       fallthrough;
+               case XTR_ESCAPE:
+                       *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp));
+                       byte_cnt += 4;
+                       rxbuf++;
+                       break;
+               default:
+                       *rxbuf = val;
+                       byte_cnt += 4;
+                       rxbuf++;
+               }
+       }
+
+       if (abort_flag || pruned_flag || !eof_flag) {
+               netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n",
+                          abort_flag, pruned_flag, eof_flag);
+               kfree_skb(skb);
+               netdev->stats.rx_dropped++;
+               return;
+       }
+
+       /* Everything we see on an interface that is in the HW bridge
+        * has already been forwarded
+        */
+       if (test_bit(port->portno, sparx5->bridge_mask))
+               skb->offload_fwd_mark = 1;
+
+       /* Finish up skb */
+       skb_put(skb, byte_cnt - ETH_FCS_LEN);
+       eth_skb_pad(skb);
+       skb->protocol = eth_type_trans(skb, netdev);
+       netif_rx(skb);
+       netdev->stats.rx_bytes += skb->len;
+       netdev->stats.rx_packets++;
+}
+
+static int sparx5_inject(struct sparx5 *sparx5,
+                        u32 *ifh,
+                        struct sk_buff *skb,
+                        struct net_device *ndev)
+{
+       int grp = INJ_QUEUE;
+       u32 val, w, count;
+       u8 *buf;
+
+       val = spx5_rd(sparx5, QS_INJ_STATUS);
+       if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) {
+               pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n",
+                                  QS_INJ_STATUS_FIFO_RDY_GET(val));
+               return -EBUSY;
+       }
+
+       /* Indicate SOF */
+       spx5_wr(QS_INJ_CTRL_SOF_SET(1) |
+               QS_INJ_CTRL_GAP_SIZE_SET(1),
+               sparx5, QS_INJ_CTRL(grp));
+
+       /* Write the IFH to the chip. */
+       for (w = 0; w < IFH_LEN; w++)
+               spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp));
+
+       /* Write words, round up */
+       count = DIV_ROUND_UP(skb->len, 4);
+       buf = skb->data;
+       for (w = 0; w < count; w++, buf += 4) {
+               val = get_unaligned((const u32 *)buf);
+               spx5_wr(val, sparx5, QS_INJ_WR(grp));
+       }
+
+       /* Add padding */
+       while (w < (60 / 4)) {
+               spx5_wr(0, sparx5, QS_INJ_WR(grp));
+               w++;
+       }
+
+       /* Indicate EOF and valid bytes in last word */
+       spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+               QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) |
+               QS_INJ_CTRL_EOF_SET(1),
+               sparx5, QS_INJ_CTRL(grp));
+
+       /* Add dummy CRC */
+       spx5_wr(0, sparx5, QS_INJ_WR(grp));
+       w++;
+
+       val = spx5_rd(sparx5, QS_INJ_STATUS);
+       if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
+               struct sparx5_port *port = netdev_priv(ndev);
+
+               pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n",
+                                  QS_INJ_STATUS_WMARK_REACHED_GET(val));
+               netif_stop_queue(ndev);
+               hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS,
+                             HRTIMER_MODE_REL);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+{
+       struct net_device_stats *stats = &dev->stats;
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int ret;
+
+       ret = sparx5_inject(sparx5, port->ifh, skb, dev);
+
+       if (ret == NETDEV_TX_OK) {
+               stats->tx_bytes += skb->len;
+               stats->tx_packets++;
+               skb_tx_timestamp(skb);
+               dev_kfree_skb_any(skb);
+       } else {
+               stats->tx_dropped++;
+       }
+       return ret;
+}
+
+static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr)
+{
+       struct sparx5_port *port = container_of(tmr, struct sparx5_port,
+                                               inj_timer);
+       int grp = INJ_QUEUE;
+       u32 val;
+
+       val = spx5_rd(port->sparx5, QS_INJ_STATUS);
+       if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
+               pr_err_ratelimited("Injection: Reset watermark count\n");
+               /* Reset Watermark count to restart */
+               spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
+                        DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
+                        port->sparx5,
+                        DSM_DEV_TX_STOP_WM_CFG(port->portno));
+       }
+       netif_wake_queue(port->ndev);
+       return HRTIMER_NORESTART;
+}
+
+int sparx5_manual_injection_mode(struct sparx5 *sparx5)
+{
+       const int byte_swap = 1;
+       int portno;
+
+       /* Change mode to manual extraction and injection */
+       spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+               QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
+               QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+               sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
+       spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+               QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+               sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
+
+       /* CPU ports capture setup */
+       for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
+               /* ASM CPU port: No preamble, IFH, enable padding */
+               spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
+                       ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
+                       ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
+                       sparx5, ASM_PORT_CFG(portno));
+
+               /* Reset WM cnt to unclog queued frames */
+               spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
+                        DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
+                        sparx5,
+                        DSM_DEV_TX_STOP_WM_CFG(portno));
+
+               /* Set Disassembler Stop Watermark level */
+               spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0),
+                        DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
+                        sparx5,
+                        DSM_DEV_TX_STOP_WM_CFG(portno));
+
+               /* Enable Disassembler buffer underrun watchdog
+                */
+               spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0),
+                        DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
+                        sparx5,
+                        DSM_BUF_CFG(portno));
+       }
+       return 0;
+}
+
+irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
+{
+       struct sparx5 *s5 = _sparx5;
+       int poll = 64;
+
+       /* Check data in queue */
+       while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0)
+               sparx5_xtr_grp(s5, XTR_QUEUE, false);
+
+       return IRQ_HANDLED;
+}
+
+void sparx5_port_inj_timer_setup(struct sparx5_port *port)
+{
+       hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       port->inj_timer.function = sparx5_injection_timeout;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
new file mode 100644 (file)
index 0000000..af70e27
--- /dev/null
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/phylink.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/sfp.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_port_config *b)
+{
+       if (a->speed != b->speed ||
+           a->portmode != b->portmode ||
+           a->autoneg != b->autoneg ||
+           a->pause_adv != b->pause_adv ||
+           a->power_down != b->power_down ||
+           a->media != b->media)
+               return true;
+       return false;
+}
+
+static void sparx5_phylink_validate(struct phylink_config *config,
+                                   unsigned long *supported,
+                                   struct phylink_link_state *state)
+{
+       struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+       phylink_set(mask, Autoneg);
+       phylink_set_port_modes(mask);
+       phylink_set(mask, Pause);
+       phylink_set(mask, Asym_Pause);
+
+       switch (state->interface) {
+       case PHY_INTERFACE_MODE_5GBASER:
+       case PHY_INTERFACE_MODE_10GBASER:
+       case PHY_INTERFACE_MODE_25GBASER:
+       case PHY_INTERFACE_MODE_NA:
+               if (port->conf.bandwidth == SPEED_5000)
+                       phylink_set(mask, 5000baseT_Full);
+               if (port->conf.bandwidth == SPEED_10000) {
+                       phylink_set(mask, 5000baseT_Full);
+                       phylink_set(mask, 10000baseT_Full);
+                       phylink_set(mask, 10000baseCR_Full);
+                       phylink_set(mask, 10000baseSR_Full);
+                       phylink_set(mask, 10000baseLR_Full);
+                       phylink_set(mask, 10000baseLRM_Full);
+                       phylink_set(mask, 10000baseER_Full);
+               }
+               if (port->conf.bandwidth == SPEED_25000) {
+                       phylink_set(mask, 5000baseT_Full);
+                       phylink_set(mask, 10000baseT_Full);
+                       phylink_set(mask, 10000baseCR_Full);
+                       phylink_set(mask, 10000baseSR_Full);
+                       phylink_set(mask, 10000baseLR_Full);
+                       phylink_set(mask, 10000baseLRM_Full);
+                       phylink_set(mask, 10000baseER_Full);
+                       phylink_set(mask, 25000baseCR_Full);
+                       phylink_set(mask, 25000baseSR_Full);
+               }
+               if (state->interface != PHY_INTERFACE_MODE_NA)
+                       break;
+               fallthrough;
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+               phylink_set(mask, 10baseT_Half);
+               phylink_set(mask, 10baseT_Full);
+               phylink_set(mask, 100baseT_Half);
+               phylink_set(mask, 100baseT_Full);
+               phylink_set(mask, 1000baseT_Full);
+               phylink_set(mask, 1000baseX_Full);
+               if (state->interface != PHY_INTERFACE_MODE_NA)
+                       break;
+               fallthrough;
+       case PHY_INTERFACE_MODE_1000BASEX:
+       case PHY_INTERFACE_MODE_2500BASEX:
+               if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+                       phylink_set(mask, 1000baseT_Full);
+                       phylink_set(mask, 1000baseX_Full);
+               }
+               if (state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+                   state->interface == PHY_INTERFACE_MODE_NA) {
+                       phylink_set(mask, 2500baseT_Full);
+                       phylink_set(mask, 2500baseX_Full);
+               }
+               break;
+       default:
+               bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+               return;
+       }
+       bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+       bitmap_and(state->advertising, state->advertising, mask,
+                  __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void sparx5_phylink_mac_config(struct phylink_config *config,
+                                     unsigned int mode,
+                                     const struct phylink_link_state *state)
+{
+       /* Currently not used */
+}
+
+static void sparx5_phylink_mac_link_up(struct phylink_config *config,
+                                      struct phy_device *phy,
+                                      unsigned int mode,
+                                      phy_interface_t interface,
+                                      int speed, int duplex,
+                                      bool tx_pause, bool rx_pause)
+{
+       struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+       struct sparx5_port_config conf;
+       int err;
+
+       conf = port->conf;
+       conf.duplex = duplex;
+       conf.pause = 0;
+       conf.pause |= tx_pause ? MLO_PAUSE_TX : 0;
+       conf.pause |= rx_pause ? MLO_PAUSE_RX : 0;
+       conf.speed = speed;
+       /* Configure the port to speed/duplex/pause */
+       err = sparx5_port_config(port->sparx5, port, &conf);
+       if (err)
+               netdev_err(port->ndev, "port config failed: %d\n", err);
+}
+
+static void sparx5_phylink_mac_link_down(struct phylink_config *config,
+                                        unsigned int mode,
+                                        phy_interface_t interface)
+{
+       /* Currently not used */
+}
+
+static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
+{
+       return container_of(pcs, struct sparx5_port, phylink_pcs);
+}
+
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+                                struct phylink_link_state *state)
+{
+       struct sparx5_port *port = sparx5_pcs_to_port(pcs);
+       struct sparx5_port_status status;
+
+       sparx5_get_port_status(port->sparx5, port, &status);
+       state->link = status.link && !status.link_down;
+       state->an_complete = status.an_complete;
+       state->speed = status.speed;
+       state->duplex = status.duplex;
+       state->pause = status.pause;
+}
+
+static int sparx5_pcs_config(struct phylink_pcs *pcs,
+                            unsigned int mode,
+                            phy_interface_t interface,
+                            const unsigned long *advertising,
+                            bool permit_pause_to_mac)
+{
+       struct sparx5_port *port = sparx5_pcs_to_port(pcs);
+       struct sparx5_port_config conf;
+       int ret = 0;
+
+       conf = port->conf;
+       conf.power_down = false;
+       conf.portmode = interface;
+       conf.inband = phylink_autoneg_inband(mode);
+       conf.autoneg = phylink_test(advertising, Autoneg);
+       conf.pause_adv = 0;
+       if (phylink_test(advertising, Pause))
+               conf.pause_adv |= ADVERTISE_1000XPAUSE;
+       if (phylink_test(advertising, Asym_Pause))
+               conf.pause_adv |= ADVERTISE_1000XPSE_ASYM;
+       if (sparx5_is_baser(interface)) {
+               if (phylink_test(advertising, FIBRE))
+                       conf.media = PHY_MEDIA_SR;
+               else
+                       conf.media = PHY_MEDIA_DAC;
+       }
+       if (!port_conf_has_changed(&port->conf, &conf))
+               return ret;
+       /* Enable the PCS matching this interface type */
+       ret = sparx5_port_pcs_set(port->sparx5, port, &conf);
+       if (ret)
+               netdev_err(port->ndev, "port PCS config failed: %d\n", ret);
+       return ret;
+}
+
+static void sparx5_pcs_aneg_restart(struct phylink_pcs *pcs)
+{
+       /* Currently not used */
+}
+
+const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
+       .pcs_get_state = sparx5_pcs_get_state,
+       .pcs_config = sparx5_pcs_config,
+       .pcs_an_restart = sparx5_pcs_aneg_restart,
+};
+
+const struct phylink_mac_ops sparx5_phylink_mac_ops = {
+       .validate = sparx5_phylink_validate,
+       .mac_config = sparx5_phylink_mac_config,
+       .mac_link_down = sparx5_phylink_mac_link_down,
+       .mac_link_up = sparx5_phylink_mac_link_up,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
new file mode 100644 (file)
index 0000000..d2e3250
--- /dev/null
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+#define SPX5_ETYPE_TAG_C     0x8100
+#define SPX5_ETYPE_TAG_S     0x88a8
+
+#define SPX5_WAIT_US         1000
+#define SPX5_WAIT_MAX_US     2000
+
+enum port_error {
+       SPX5_PERR_SPEED,
+       SPX5_PERR_IFTYPE,
+};
+
+#define PAUSE_DISCARD        0xC
+#define ETH_MAXLEN           (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
+
+static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
+{
+       status->an_complete = true;
+       if (!(lp_abil & LPA_SGMII_LINK)) {
+               status->link = false;
+               return;
+       }
+
+       switch (lp_abil & LPA_SGMII_SPD_MASK) {
+       case LPA_SGMII_10:
+               status->speed = SPEED_10;
+               break;
+       case LPA_SGMII_100:
+               status->speed = SPEED_100;
+               break;
+       case LPA_SGMII_1000:
+               status->speed = SPEED_1000;
+               break;
+       default:
+               status->link = false;
+               return;
+       }
+       if (lp_abil & LPA_SGMII_FULL_DUPLEX)
+               status->duplex = DUPLEX_FULL;
+       else
+               status->duplex = DUPLEX_HALF;
+}
+
+static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
+{
+       status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
+       status->an_complete = true;
+       status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
+               DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
+
+       if ((ld_abil & ADVERTISE_1000XPAUSE) &&
+           (lp_abil & ADVERTISE_1000XPAUSE)) {
+               status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
+       } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
+                  (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
+               status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
+                       MLO_PAUSE_TX : 0;
+               status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
+                       MLO_PAUSE_RX : 0;
+       } else {
+               status->pause = MLO_PAUSE_NONE;
+       }
+}
+
+static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
+                                   struct sparx5_port *port,
+                                   struct sparx5_port_status *status)
+{
+       u32 portno = port->portno;
+       u16 lp_adv, ld_adv;
+       u32 value;
+
+       /* Get PCS Link down sticky */
+       value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
+       status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
+       if (status->link_down)  /* Clear the sticky */
+               spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
+
+       /* Get both current Link and Sync status */
+       value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
+       status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
+                      DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
+
+       if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
+               status->speed = SPEED_1000;
+       else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
+               status->speed = SPEED_2500;
+
+       status->duplex = DUPLEX_FULL;
+
+       /* Get PCS ANEG status register */
+       value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
+
+       /* Aneg complete provides more information  */
+       if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
+               lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
+               if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
+                       decode_sgmii_word(lp_adv, status);
+               } else {
+                       value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
+                       ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
+                       decode_cl37_word(lp_adv, ld_adv, status);
+               }
+       }
+       return 0;
+}
+
+static int sparx5_get_sfi_status(struct sparx5 *sparx5,
+                                struct sparx5_port *port,
+                                struct sparx5_port_status *status)
+{
+       bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
+       u32 portno = port->portno;
+       u32 value, dev, tinst;
+       void __iomem *inst;
+
+       if (!high_speed_dev) {
+               netdev_err(port->ndev, "error: low speed and SFI mode\n");
+               return -EINVAL;
+       }
+
+       dev = sparx5_to_high_dev(portno);
+       tinst = sparx5_port_dev_index(portno);
+       inst = spx5_inst_get(sparx5, dev, tinst);
+
+       value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+       if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
+               /* The link is or has been down. Clear the sticky bit */
+               status->link_down = 1;
+               spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+               value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+       }
+       status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
+       status->duplex = DUPLEX_FULL;
+       if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
+               status->speed = SPEED_5000;
+       else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
+               status->speed = SPEED_10000;
+       else
+               status->speed = SPEED_25000;
+
+       return 0;
+}
+
+/* Get link status of 1000Base-X/in-band and SFI ports.
+ */
+int sparx5_get_port_status(struct sparx5 *sparx5,
+                          struct sparx5_port *port,
+                          struct sparx5_port_status *status)
+{
+       memset(status, 0, sizeof(*status));
+       status->speed = port->conf.speed;
+       if (port->conf.power_down) {
+               status->link = false;
+               return 0;
+       }
+       switch (port->conf.portmode) {
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_1000BASEX:
+       case PHY_INTERFACE_MODE_2500BASEX:
+               return sparx5_get_dev2g5_status(sparx5, port, status);
+       case PHY_INTERFACE_MODE_5GBASER:
+       case PHY_INTERFACE_MODE_10GBASER:
+       case PHY_INTERFACE_MODE_25GBASER:
+               return sparx5_get_sfi_status(sparx5, port, status);
+       case PHY_INTERFACE_MODE_NA:
+               return 0;
+       default:
+               netdev_err(port->ndev, "Status not supported");
+               return -ENODEV;
+       }
+       return 0;
+}
+
+static int sparx5_port_error(struct sparx5_port *port,
+                            struct sparx5_port_config *conf,
+                            enum port_error errtype)
+{
+       switch (errtype) {
+       case SPX5_PERR_SPEED:
+               netdev_err(port->ndev,
+                          "Interface does not support speed: %u: for %s\n",
+                          conf->speed, phy_modes(conf->portmode));
+               break;
+       case SPX5_PERR_IFTYPE:
+               netdev_err(port->ndev,
+                          "Switch port does not support interface type: %s\n",
+                          phy_modes(conf->portmode));
+               break;
+       default:
+               netdev_err(port->ndev,
+                          "Interface configuration error\n");
+       }
+
+       return -EINVAL;
+}
+
+static int sparx5_port_verify_speed(struct sparx5 *sparx5,
+                                   struct sparx5_port *port,
+                                   struct sparx5_port_config *conf)
+{
+       if ((sparx5_port_is_2g5(port->portno) &&
+            conf->speed > SPEED_2500) ||
+           (sparx5_port_is_5g(port->portno)  &&
+            conf->speed > SPEED_5000) ||
+           (sparx5_port_is_10g(port->portno) &&
+            conf->speed > SPEED_10000))
+               return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+
+       switch (conf->portmode) {
+       case PHY_INTERFACE_MODE_NA:
+               return -EINVAL;
+       case PHY_INTERFACE_MODE_1000BASEX:
+               if (conf->speed != SPEED_1000 ||
+                   sparx5_port_is_2g5(port->portno))
+                       return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+               if (sparx5_port_is_2g5(port->portno))
+                       return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+               break;
+       case PHY_INTERFACE_MODE_2500BASEX:
+               if (conf->speed != SPEED_2500 ||
+                   sparx5_port_is_2g5(port->portno))
+                       return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+               break;
+       case PHY_INTERFACE_MODE_QSGMII:
+               if (port->portno > 47)
+                       return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+               fallthrough;
+       case PHY_INTERFACE_MODE_SGMII:
+               if (conf->speed != SPEED_1000 &&
+                   conf->speed != SPEED_100 &&
+                   conf->speed != SPEED_10 &&
+                   conf->speed != SPEED_2500)
+                       return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+               break;
+       case PHY_INTERFACE_MODE_5GBASER:
+       case PHY_INTERFACE_MODE_10GBASER:
+       case PHY_INTERFACE_MODE_25GBASER:
+               if ((conf->speed != SPEED_5000 &&
+                    conf->speed != SPEED_10000 &&
+                    conf->speed != SPEED_25000))
+                       return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+               break;
+       default:
+               return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+       }
+       return 0;
+}
+
+static bool sparx5_dev_change(struct sparx5 *sparx5,
+                             struct sparx5_port *port,
+                             struct sparx5_port_config *conf)
+{
+       return sparx5_is_baser(port->conf.portmode) ^
+               sparx5_is_baser(conf->portmode);
+}
+
+static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
+{
+       u32  value, resource, prio, delay_cnt = 0;
+       bool poll_src = true;
+       char *mem = "";
+
+       /* Resource == 0: Memory tracked per source (SRC-MEM)
+        * Resource == 1: Frame references tracked per source (SRC-REF)
+        * Resource == 2: Memory tracked per destination (DST-MEM)
+        * Resource == 3: Frame references tracked per destination. (DST-REF)
+        */
+       while (1) {
+               bool empty = true;
+
+               for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
+                       u32 base;
+
+                       base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
+                       for (prio = 0; prio < SPX5_PRIOS; prio++) {
+                               value = spx5_rd(sparx5,
+                                               QRES_RES_STAT(base + prio));
+                               if (value) {
+                                       mem = resource == 0 ?
+                                               "DST-MEM" : "SRC-MEM";
+                                       empty = false;
+                               }
+                       }
+               }
+
+               if (empty)
+                       break;
+
+               if (delay_cnt++ == 2000) {
+                       dev_err(sparx5->dev,
+                               "Flush timeout port %u. %s queue not empty\n",
+                               portno, mem);
+                       return -EINVAL;
+               }
+
+               usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
+       }
+       return 0;
+}
+
+static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
+{
+       u32 tinst = high_spd_dev ?
+                   sparx5_port_dev_index(port->portno) : port->portno;
+       u32 dev = high_spd_dev ?
+                 sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
+       void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
+       u32 spd = port->conf.speed;
+       u32 spd_prm;
+       int err;
+
+       if (high_spd_dev) {
+               /* 1: Reset the PCS Rx clock domain  */
+               spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
+                             DEV10G_DEV_RST_CTRL_PCS_RX_RST,
+                             devinst,
+                             DEV10G_DEV_RST_CTRL(0));
+
+               /* 2: Disable MAC frame reception */
+               spx5_inst_rmw(0,
+                             DEV10G_MAC_ENA_CFG_RX_ENA,
+                             devinst,
+                             DEV10G_MAC_ENA_CFG(0));
+       } else {
+               /* 1: Reset the PCS Rx clock domain  */
+               spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+                             DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+                             devinst,
+                             DEV2G5_DEV_RST_CTRL(0));
+               /* 2: Disable MAC frame reception */
+               spx5_inst_rmw(0,
+                             DEV2G5_MAC_ENA_CFG_RX_ENA,
+                             devinst,
+                             DEV2G5_MAC_ENA_CFG(0));
+       }
+       /* 3: Disable traffic being sent to or from switch port->portno */
+       spx5_rmw(0,
+                QFWD_SWITCH_PORT_MODE_PORT_ENA,
+                sparx5,
+                QFWD_SWITCH_PORT_MODE(port->portno));
+
+       /* 4: Disable dequeuing from the egress queues  */
+       spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
+                HSCH_PORT_MODE_DEQUEUE_DIS,
+                sparx5,
+                HSCH_PORT_MODE(port->portno));
+
+       /* 5: Disable Flowcontrol */
+       spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
+                QSYS_PAUSE_CFG_PAUSE_STOP,
+                sparx5,
+                QSYS_PAUSE_CFG(port->portno));
+
+       spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
+       /* 6: Wait while the last frame is exiting the queues */
+       usleep_range(8 * spd_prm, 10 * spd_prm);
+
+       /* 7: Flush the queues accociated with the port->portno */
+       spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
+                HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
+                HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
+                HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
+                HSCH_FLUSH_CTRL_FLUSH_PORT |
+                HSCH_FLUSH_CTRL_FLUSH_DST |
+                HSCH_FLUSH_CTRL_FLUSH_SRC |
+                HSCH_FLUSH_CTRL_FLUSH_ENA,
+                sparx5,
+                HSCH_FLUSH_CTRL);
+
+       /* 8: Enable dequeuing from the egress queues */
+       spx5_rmw(0,
+                HSCH_PORT_MODE_DEQUEUE_DIS,
+                sparx5,
+                HSCH_PORT_MODE(port->portno));
+
+       /* 9: Wait until flushing is complete */
+       err = sparx5_port_flush_poll(sparx5, port->portno);
+       if (err)
+               return err;
+
+       /* 10: Reset the  MAC clock domain */
+       if (high_spd_dev) {
+               spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
+                             DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
+                             DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
+                             DEV10G_DEV_RST_CTRL_PCS_TX_RST |
+                             DEV10G_DEV_RST_CTRL_MAC_RX_RST |
+                             DEV10G_DEV_RST_CTRL_MAC_TX_RST,
+                             devinst,
+                             DEV10G_DEV_RST_CTRL(0));
+
+       } else {
+               spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
+                             DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
+                             DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
+                             DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
+                             DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
+                             DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+                             DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
+                             DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
+                             DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
+                             DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
+                             devinst,
+                             DEV2G5_DEV_RST_CTRL(0));
+       }
+       /* 11: Clear flushing */
+       spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
+                HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
+                HSCH_FLUSH_CTRL_FLUSH_PORT |
+                HSCH_FLUSH_CTRL_FLUSH_ENA,
+                sparx5,
+                HSCH_FLUSH_CTRL);
+
+       if (high_spd_dev) {
+               u32 pcs = sparx5_to_pcs_dev(port->portno);
+               void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
+
+               /* 12: Disable 5G/10G/25 BaseR PCS */
+               spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
+                             PCS10G_BR_PCS_CFG_PCS_ENA,
+                             pcsinst,
+                             PCS10G_BR_PCS_CFG(0));
+
+               if (sparx5_port_is_25g(port->portno))
+                       /* Disable 25G PCS */
+                       spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
+                                DEV25G_PCS25G_CFG_PCS25G_ENA,
+                                sparx5,
+                                DEV25G_PCS25G_CFG(tinst));
+       } else {
+               /* 12: Disable 1G PCS */
+               spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
+                        DEV2G5_PCS1G_CFG_PCS_ENA,
+                        sparx5,
+                        DEV2G5_PCS1G_CFG(port->portno));
+       }
+
+       /* The port is now flushed and disabled  */
+       return 0;
+}
+
+static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
+                              u32 portno, u32 speed)
+{
+       u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
+       const u32 taxi_dist[SPX5_PORTS_ALL] = {
+               6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
+               4, 4, 4, 4,
+               11, 12, 13, 14, 15, 16, 17, 18,
+               11, 12, 13, 14, 15, 16, 17, 18,
+               11, 12, 13, 14, 15, 16, 17, 18,
+               11, 12, 13, 14, 15, 16, 17, 18,
+               4, 6, 8, 4, 6, 8, 6, 8,
+               2, 2, 2, 2, 2, 2, 2, 4, 2
+       };
+       u32 mac_per    = 6400, tmp1, tmp2, tmp3;
+       u32 fifo_width = 16;
+       u32 mac_width  = 8;
+       u32 addition   = 0;
+
+       switch (speed) {
+       case SPEED_25000:
+               return 0;
+       case SPEED_10000:
+               mac_per = 6400;
+               mac_width = 8;
+               addition = 1;
+               break;
+       case SPEED_5000:
+               mac_per = 12800;
+               mac_width = 8;
+               addition = 0;
+               break;
+       case SPEED_2500:
+               mac_per = 3200;
+               mac_width = 1;
+               addition = 0;
+               break;
+       case SPEED_1000:
+               mac_per =  8000;
+               mac_width = 1;
+               addition = 0;
+               break;
+       case SPEED_100:
+       case SPEED_10:
+               return 1;
+       default:
+               break;
+       }
+
+       tmp1 = 1000 * mac_width / fifo_width;
+       tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
+                      * sys_clk / mac_per);
+       tmp3 = tmp1 * tmp2 / 1000;
+       return  (tmp3 + 2000 + 999) / 1000 + addition;
+}
+
+/* Configure port muxing:
+ * QSGMII:     4x2G5 devices
+ */
+static int sparx5_port_mux_set(struct sparx5 *sparx5,
+                              struct sparx5_port *port,
+                              struct sparx5_port_config *conf)
+{
+       u32 portno = port->portno;
+       u32 inst;
+
+       if (port->conf.portmode == conf->portmode)
+               return 0; /* Nothing to do */
+
+       switch (conf->portmode) {
+       case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q'  */
+               inst = (portno - portno % 4) / 4;
+               spx5_rmw(BIT(inst),
+                        BIT(inst),
+                        sparx5,
+                        PORT_CONF_QSGMII_ENA);
+
+               if ((portno / 4 % 2) == 0) {
+                       /* Affects d0-d3,d8-d11..d40-d43 */
+                       spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
+                                PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
+                                PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
+                                PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
+                                PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
+                                PORT_CONF_USGMII_CFG_QUAD_MODE,
+                                sparx5,
+                                PORT_CONF_USGMII_CFG((portno / 8)));
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
+                                   struct sparx5_port *port)
+{
+       enum sparx5_port_max_tags max_tags    = port->max_vlan_tags;
+       int tag_ct          = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
+                             max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
+       bool dtag           = max_tags == SPX5_PORT_MAX_TAGS_TWO;
+       enum sparx5_vlan_port_type vlan_type  = port->vlan_type;
+       bool dotag          = max_tags != SPX5_PORT_MAX_TAGS_NONE;
+       u32 dev             = sparx5_to_high_dev(port->portno);
+       u32 tinst           = sparx5_port_dev_index(port->portno);
+       void __iomem *inst  = spx5_inst_get(sparx5, dev, tinst);
+       u32 etype;
+
+       etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+                port->custom_etype :
+                vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+                SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
+
+       spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+               DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+               DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+               DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+               sparx5,
+               DEV2G5_MAC_TAGS_CFG(port->portno));
+
+       if (sparx5_port_is_2g5(port->portno))
+               return 0;
+
+       spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+                     DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
+                     DEV10G_MAC_TAGS_CFG_TAG_ID |
+                     DEV10G_MAC_TAGS_CFG_TAG_ENA,
+                     inst,
+                     DEV10G_MAC_TAGS_CFG(0, 0));
+
+       spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
+                     DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
+                     inst,
+                     DEV10G_MAC_NUM_TAGS_CFG(0));
+
+       spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
+                     DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
+                     inst,
+                     DEV10G_MAC_MAXLEN_CFG(0));
+       return 0;
+}
+
+static int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
+{
+       u32 clk_period_ps = 1600; /* 625Mhz for now */
+       u32 urg = 672000;
+
+       switch (speed) {
+       case SPEED_10:
+       case SPEED_100:
+       case SPEED_1000:
+               urg = 672000;
+               break;
+       case SPEED_2500:
+               urg = 270000;
+               break;
+       case SPEED_5000:
+               urg = 135000;
+               break;
+       case SPEED_10000:
+               urg = 67200;
+               break;
+       case SPEED_25000:
+               urg = 27000;
+               break;
+       }
+       return urg / clk_period_ps - 1;
+}
+
+static u16 sparx5_wm_enc(u16 value)
+{
+       if (value >= 2048)
+               return 2048 + value / 16;
+
+       return value;
+}
+
+static int sparx5_port_fc_setup(struct sparx5 *sparx5,
+                               struct sparx5_port *port,
+                               struct sparx5_port_config *conf)
+{
+       bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
+       u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
+
+       if (conf->pause & MLO_PAUSE_TX)
+               pause_stop = sparx5_wm_enc(4  * (ETH_MAXLEN /
+                                                SPX5_BUFFER_CELL_SZ));
+
+       /* Set HDX flowcontrol */
+       spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
+                DSM_MAC_CFG_HDX_BACKPREASSURE,
+                sparx5,
+                DSM_MAC_CFG(port->portno));
+
+       /* Obey flowcontrol  */
+       spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
+                DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
+                sparx5,
+                DSM_RX_PAUSE_CFG(port->portno));
+
+       /* Disable forward pressure */
+       spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
+                QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
+                sparx5,
+                QSYS_FWD_PRESSURE(port->portno));
+
+       /* Generate pause frames */
+       spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
+                QSYS_PAUSE_CFG_PAUSE_STOP,
+                sparx5,
+                QSYS_PAUSE_CFG(port->portno));
+
+       return 0;
+}
+
+static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
+{
+       if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
+               return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
+       else
+               return 1; /* Enable SGMII Aneg */
+}
+
+int sparx5_serdes_set(struct sparx5 *sparx5,
+                     struct sparx5_port *port,
+                     struct sparx5_port_config *conf)
+{
+       int portmode, err, speed = conf->speed;
+
+       if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
+           ((port->portno % 4) != 0)) {
+               return 0;
+       }
+       if (sparx5_is_baser(conf->portmode)) {
+               if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
+                       speed = SPEED_25000;
+               else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
+                       speed = SPEED_10000;
+               else
+                       speed = SPEED_5000;
+       }
+
+       err = phy_set_media(port->serdes, conf->media);
+       if (err)
+               return err;
+       if (speed > 0) {
+               err = phy_set_speed(port->serdes, speed);
+               if (err)
+                       return err;
+       }
+       if (conf->serdes_reset) {
+               err = phy_reset(port->serdes);
+               if (err)
+                       return err;
+       }
+
+       /* Configure SerDes with port parameters
+        * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
+        */
+       portmode = conf->portmode;
+       if (sparx5_is_baser(conf->portmode))
+               portmode = PHY_INTERFACE_MODE_10GBASER;
+       err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
+       if (err)
+               return err;
+       conf->serdes_reset = false;
+       return err;
+}
+
+static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
+                                  struct sparx5_port *port,
+                                  struct sparx5_port_config *conf)
+{
+       bool sgmii = false, inband_aneg = false;
+       int err;
+
+       if (port->conf.inband) {
+               if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
+                   conf->portmode == PHY_INTERFACE_MODE_QSGMII)
+                       inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+               else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
+                        conf->autoneg)
+                       inband_aneg = true; /* Clause-37 in-band-aneg */
+
+               err = sparx5_serdes_set(sparx5, port, conf);
+               if (err)
+                       return -EINVAL;
+       } else {
+               sgmii = true; /* Phy is connnected to the MAC */
+       }
+
+       /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
+       spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
+                DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+                sparx5,
+                DEV2G5_PCS1G_MODE_CFG(port->portno));
+
+       /* Enable PCS */
+       spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
+               sparx5,
+               DEV2G5_PCS1G_CFG(port->portno));
+
+       if (inband_aneg) {
+               u16 abil = sparx5_get_aneg_word(conf);
+
+               /* Enable in-band aneg */
+               spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
+                       DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
+                       DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
+                       DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
+                       sparx5,
+                       DEV2G5_PCS1G_ANEG_CFG(port->portno));
+       } else {
+               spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
+       }
+
+       /* Take PCS out of reset */
+       spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
+                DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
+                DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
+                DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+                DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
+                DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+                sparx5,
+                DEV2G5_DEV_RST_CTRL(port->portno));
+
+       return 0;
+}
+
+static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
+                                   struct sparx5_port *port,
+                                   struct sparx5_port_config *conf)
+{
+       u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
+       u32 pix = sparx5_port_dev_index(port->portno);
+       u32 dev = sparx5_to_high_dev(port->portno);
+       u32 pcs = sparx5_to_pcs_dev(port->portno);
+       void __iomem *devinst;
+       void __iomem *pcsinst;
+       int err;
+
+       devinst = spx5_inst_get(sparx5, dev, pix);
+       pcsinst = spx5_inst_get(sparx5, pcs, pix);
+
+       /*  SFI : No in-band-aneg. Speeds 5G/10G/25G */
+       err = sparx5_serdes_set(sparx5, port, conf);
+       if (err)
+               return -EINVAL;
+       if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
+               /* Enable PCS for 25G device, speed 25G */
+               spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
+                        DEV25G_PCS25G_CFG_PCS25G_ENA,
+                        sparx5,
+                        DEV25G_PCS25G_CFG(pix));
+       } else {
+               /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
+               spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
+                             PCS10G_BR_PCS_CFG_PCS_ENA,
+                             pcsinst,
+                             PCS10G_BR_PCS_CFG(0));
+       }
+
+       /* Enable 5G/10G/25G MAC module */
+       spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
+                    DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
+                    devinst,
+                    DEV10G_MAC_ENA_CFG(0));
+
+       /* Take the device out of reset */
+       spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
+                     DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
+                     DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
+                     DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
+                     DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
+                     DEV10G_DEV_RST_CTRL_PCS_RX_RST |
+                     DEV10G_DEV_RST_CTRL_PCS_TX_RST |
+                     DEV10G_DEV_RST_CTRL_MAC_RX_RST |
+                     DEV10G_DEV_RST_CTRL_MAC_TX_RST |
+                     DEV10G_DEV_RST_CTRL_SPEED_SEL,
+                     devinst,
+                     DEV10G_DEV_RST_CTRL(0));
+
+       return 0;
+}
+
+/* Switch between 1G/2500 and 5G/10G/25G devices */
+static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
+{
+       int bt_indx = BIT(sparx5_port_dev_index(port));
+
+       if (sparx5_port_is_5g(port)) {
+               spx5_rmw(hsd ? 0 : bt_indx,
+                        bt_indx,
+                        sparx5,
+                        PORT_CONF_DEV5G_MODES);
+       } else if (sparx5_port_is_10g(port)) {
+               spx5_rmw(hsd ? 0 : bt_indx,
+                        bt_indx,
+                        sparx5,
+                        PORT_CONF_DEV10G_MODES);
+       } else if (sparx5_port_is_25g(port)) {
+               spx5_rmw(hsd ? 0 : bt_indx,
+                        bt_indx,
+                        sparx5,
+                        PORT_CONF_DEV25G_MODES);
+       }
+}
+
+/* Configure speed/duplex dependent registers */
+static int sparx5_port_config_low_set(struct sparx5 *sparx5,
+                                     struct sparx5_port *port,
+                                     struct sparx5_port_config *conf)
+{
+       u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
+       bool fdx = conf->duplex == DUPLEX_FULL;
+       int spd = conf->speed;
+
+       clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
+       gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
+       tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
+       hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
+       hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
+
+       /* GIG/FDX mode */
+       spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
+                DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
+                DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
+                DEV2G5_MAC_MODE_CFG_FDX_ENA,
+                sparx5,
+                DEV2G5_MAC_MODE_CFG(port->portno));
+
+       /* Set MAC IFG Gaps */
+       spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
+               DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
+               DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
+               sparx5,
+               DEV2G5_MAC_IFG_CFG(port->portno));
+
+       /* Disabling frame aging when in HDX (due to HDX issue) */
+       spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
+                HSCH_PORT_MODE_AGE_DIS,
+                sparx5,
+                HSCH_PORT_MODE(port->portno));
+
+       /* Enable MAC module */
+       spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
+               DEV2G5_MAC_ENA_CFG_TX_ENA,
+               sparx5,
+               DEV2G5_MAC_ENA_CFG(port->portno));
+
+       /* Select speed and take MAC out of reset */
+       spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
+                DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
+                DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
+                DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+                DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
+                DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
+                sparx5,
+                DEV2G5_DEV_RST_CTRL(port->portno));
+
+       return 0;
+}
+
+int sparx5_port_pcs_set(struct sparx5 *sparx5,
+                       struct sparx5_port *port,
+                       struct sparx5_port_config *conf)
+
+{
+       bool high_speed_dev = sparx5_is_baser(conf->portmode);
+       int err;
+
+       if (sparx5_dev_change(sparx5, port, conf)) {
+               /* switch device */
+               sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
+
+               /* Disable the not-in-use device */
+               err = sparx5_port_disable(sparx5, port, !high_speed_dev);
+               if (err)
+                       return err;
+       }
+       /* Disable the port before re-configuring */
+       err = sparx5_port_disable(sparx5, port, high_speed_dev);
+       if (err)
+               return -EINVAL;
+
+       if (high_speed_dev)
+               err = sparx5_port_pcs_high_set(sparx5, port, conf);
+       else
+               err = sparx5_port_pcs_low_set(sparx5, port, conf);
+
+       if (err)
+               return -EINVAL;
+
+       if (port->conf.inband) {
+               /* Enable/disable 1G counters in ASM */
+               spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+                        ASM_PORT_CFG_CSC_STAT_DIS,
+                        sparx5,
+                        ASM_PORT_CFG(port->portno));
+
+               /* Enable/disable 1G counters in DSM */
+               spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+                        DSM_BUF_CFG_CSC_STAT_DIS,
+                        sparx5,
+                        DSM_BUF_CFG(port->portno));
+       }
+
+       port->conf = *conf;
+
+       return 0;
+}
+
+int sparx5_port_config(struct sparx5 *sparx5,
+                      struct sparx5_port *port,
+                      struct sparx5_port_config *conf)
+{
+       bool high_speed_dev = sparx5_is_baser(conf->portmode);
+       int err, urgency, stop_wm;
+
+       err = sparx5_port_verify_speed(sparx5, port, conf);
+       if (err)
+               return err;
+
+       /* high speed device is already configured */
+       if (!high_speed_dev)
+               sparx5_port_config_low_set(sparx5, port, conf);
+
+       /* Configure flow control */
+       err = sparx5_port_fc_setup(sparx5, port, conf);
+       if (err)
+               return err;
+
+       /* Set the DSM stop watermark */
+       stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
+       spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
+                DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
+                sparx5,
+                DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
+       /* Enable port in queue system */
+       urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
+       spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
+                QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
+                QFWD_SWITCH_PORT_MODE_PORT_ENA |
+                QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
+                sparx5,
+                QFWD_SWITCH_PORT_MODE(port->portno));
+
+       /* Save the new values */
+       port->conf = *conf;
+
+       return 0;
+}
+
+/* Initialize port config to default */
+int sparx5_port_init(struct sparx5 *sparx5,
+                    struct sparx5_port *port,
+                    struct sparx5_port_config *conf)
+{
+       u32 pause_start = sparx5_wm_enc(6  * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
+       u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
+       u32 devhigh = sparx5_to_high_dev(port->portno);
+       u32 pix = sparx5_port_dev_index(port->portno);
+       u32 pcs = sparx5_to_pcs_dev(port->portno);
+       bool sd_pol = port->signd_active_high;
+       bool sd_sel = !port->signd_internal;
+       bool sd_ena = port->signd_enable;
+       u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
+       void __iomem *devinst;
+       void __iomem *pcsinst;
+       int err;
+
+       devinst = spx5_inst_get(sparx5, devhigh, pix);
+       pcsinst = spx5_inst_get(sparx5, pcs, pix);
+
+       /* Set the mux port mode  */
+       err = sparx5_port_mux_set(sparx5, port, conf);
+       if (err)
+               return err;
+
+       /* Configure MAC vlan awareness */
+       err = sparx5_port_max_tags_set(sparx5, port);
+       if (err)
+               return err;
+
+       /* Set Max Length */
+       spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+                DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+                sparx5,
+                DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+       /* 1G/2G5: Signal Detect configuration */
+       spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+               DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+               DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+               sparx5,
+               DEV2G5_PCS1G_SD_CFG(port->portno));
+
+       /* Set Pause WM hysteresis */
+       spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
+                QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
+                QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
+                QSYS_PAUSE_CFG_PAUSE_START |
+                QSYS_PAUSE_CFG_PAUSE_STOP |
+                QSYS_PAUSE_CFG_PAUSE_ENA,
+                sparx5,
+                QSYS_PAUSE_CFG(port->portno));
+
+       /* Port ATOP. Frames are tail dropped when this WM is hit */
+       spx5_wr(QSYS_ATOP_ATOP_SET(atop),
+               sparx5,
+               QSYS_ATOP(port->portno));
+
+       /* Discard pause frame 01-80-C2-00-00-01 */
+       spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
+
+       if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
+           conf->portmode == PHY_INTERFACE_MODE_SGMII) {
+               err = sparx5_serdes_set(sparx5, port, conf);
+               if (err)
+                       return err;
+
+               if (!sparx5_port_is_2g5(port->portno))
+                       /* Enable shadow device */
+                       spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
+                                DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
+                                sparx5,
+                                DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
+               sparx5_dev_switch(sparx5, port->portno, false);
+       }
+       if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
+               // All ports must be PCS enabled in QSGMII mode
+               spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
+                        DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
+                        sparx5,
+                        DEV2G5_DEV_RST_CTRL(port->portno));
+       }
+       /* Default IFGs for 1G */
+       spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
+               DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
+               DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
+               sparx5,
+               DEV2G5_MAC_IFG_CFG(port->portno));
+
+       if (sparx5_port_is_2g5(port->portno))
+               return 0; /* Low speed device only - return */
+
+       /* Now setup the high speed device */
+       if (conf->portmode == PHY_INTERFACE_MODE_NA)
+               conf->portmode = PHY_INTERFACE_MODE_10GBASER;
+
+       if (sparx5_is_baser(conf->portmode))
+               sparx5_dev_switch(sparx5, port->portno, true);
+
+       /* Set Max Length */
+       spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+                     DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
+                     devinst,
+                     DEV10G_MAC_ENA_CFG(0));
+
+       /* Handle Signal Detect in 10G PCS */
+       spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
+                    PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
+                    PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
+                    pcsinst,
+                    PCS10G_BR_PCS_SD_CFG(0));
+
+       if (sparx5_port_is_25g(port->portno)) {
+               /* Handle Signal Detect in 25G PCS */
+               spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
+                       DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
+                       DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
+                       sparx5,
+                       DEV25G_PCS25G_SD_CFG(pix));
+       }
+
+       return 0;
+}
+
+void sparx5_port_enable(struct sparx5_port *port, bool enable)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+
+       /* Enable port for frame transfer? */
+       spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
+                QFWD_SWITCH_PORT_MODE_PORT_ENA,
+                sparx5,
+                QFWD_SWITCH_PORT_MODE(port->portno));
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
new file mode 100644 (file)
index 0000000..fd05ab6
--- /dev/null
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_PORT_H__
+#define __SPARX5_PORT_H__
+
+#include "sparx5_main.h"
+
+static inline bool sparx5_port_is_2g5(int portno)
+{
+       return portno >= 16 && portno <= 47;
+}
+
+static inline bool sparx5_port_is_5g(int portno)
+{
+       return portno <= 11 || portno == 64;
+}
+
+static inline bool sparx5_port_is_10g(int portno)
+{
+       return (portno >= 12 && portno <= 15) || (portno >= 48 && portno <= 55);
+}
+
+static inline bool sparx5_port_is_25g(int portno)
+{
+       return portno >= 56 && portno <= 63;
+}
+
+static inline u32 sparx5_to_high_dev(int port)
+{
+       if (sparx5_port_is_5g(port))
+               return TARGET_DEV5G;
+       if (sparx5_port_is_10g(port))
+               return TARGET_DEV10G;
+       return TARGET_DEV25G;
+}
+
+static inline u32 sparx5_to_pcs_dev(int port)
+{
+       if (sparx5_port_is_5g(port))
+               return TARGET_PCS5G_BR;
+       if (sparx5_port_is_10g(port))
+               return TARGET_PCS10G_BR;
+       return TARGET_PCS25G_BR;
+}
+
+static inline int sparx5_port_dev_index(int port)
+{
+       if (sparx5_port_is_2g5(port))
+               return port;
+       if (sparx5_port_is_5g(port))
+               return (port <= 11 ? port : 12);
+       if (sparx5_port_is_10g(port))
+               return (port >= 12 && port <= 15) ?
+                       port - 12 : port - 44;
+       return (port - 56);
+}
+
+int sparx5_port_init(struct sparx5 *sparx5,
+                    struct sparx5_port *spx5_port,
+                    struct sparx5_port_config *conf);
+
+int sparx5_port_config(struct sparx5 *sparx5,
+                      struct sparx5_port *spx5_port,
+                      struct sparx5_port_config *conf);
+
+int sparx5_port_pcs_set(struct sparx5 *sparx5,
+                       struct sparx5_port *port,
+                       struct sparx5_port_config *conf);
+
+int sparx5_serdes_set(struct sparx5 *sparx5,
+                     struct sparx5_port *spx5_port,
+                     struct sparx5_port_config *conf);
+
+struct sparx5_port_status {
+       bool link;
+       bool link_down;
+       int  speed;
+       bool an_complete;
+       int  duplex;
+       int  pause;
+};
+
+int sparx5_get_port_status(struct sparx5 *sparx5,
+                          struct sparx5_port *port,
+                          struct sparx5_port_status *status);
+
+void sparx5_port_enable(struct sparx5_port *port, bool enable);
+
+#endif /* __SPARX5_PORT_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
new file mode 100644 (file)
index 0000000..19c7cb7
--- /dev/null
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static struct workqueue_struct *sparx5_owq;
+
+struct sparx5_switchdev_event_work {
+       struct work_struct work;
+       struct switchdev_notifier_fdb_info fdb_info;
+       struct net_device *dev;
+       unsigned long event;
+};
+
+static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
+                                         struct switchdev_brport_flags flags)
+{
+       if (flags.mask & BR_MCAST_FLOOD)
+               sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
+}
+
+static void sparx5_attr_stp_state_set(struct sparx5_port *port,
+                                     u8 state)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+
+       if (!test_bit(port->portno, sparx5->bridge_mask)) {
+               netdev_err(port->ndev,
+                          "Controlling non-bridged port %d?\n", port->portno);
+               return;
+       }
+
+       switch (state) {
+       case BR_STATE_FORWARDING:
+               set_bit(port->portno, sparx5->bridge_fwd_mask);
+               fallthrough;
+       case BR_STATE_LEARNING:
+               set_bit(port->portno, sparx5->bridge_lrn_mask);
+               break;
+
+       default:
+               /* All other states treated as blocking */
+               clear_bit(port->portno, sparx5->bridge_fwd_mask);
+               clear_bit(port->portno, sparx5->bridge_lrn_mask);
+               break;
+       }
+
+       /* apply the bridge_fwd_mask to all the ports */
+       sparx5_update_fwd(sparx5);
+}
+
+static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
+                                       unsigned long ageing_clock_t)
+{
+       unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+       u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
+
+       sparx5_set_ageing(port->sparx5, ageing_time);
+}
+
+static int sparx5_port_attr_set(struct net_device *dev,
+                               const struct switchdev_attr *attr,
+                               struct netlink_ext_ack *extack)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+               sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
+               break;
+       case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+               sparx5_attr_stp_state_set(port, attr->u.stp_state);
+               break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+               sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
+               break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+               port->vlan_aware = attr->u.vlan_filtering;
+               sparx5_vlan_port_apply(port->sparx5, port);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int sparx5_port_bridge_join(struct sparx5_port *port,
+                                  struct net_device *bridge)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+
+       if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
+               /* First bridged port */
+               sparx5->hw_bridge_dev = bridge;
+       else
+               if (sparx5->hw_bridge_dev != bridge)
+                       /* This is adding the port to a second bridge, this is
+                        * unsupported
+                        */
+                       return -ENODEV;
+
+       set_bit(port->portno, sparx5->bridge_mask);
+
+       /* Port enters in bridge mode therefor don't need to copy to CPU
+        * frames for multicast in case the bridge is not requesting them
+        */
+       __dev_mc_unsync(port->ndev, sparx5_mc_unsync);
+
+       return 0;
+}
+
+static void sparx5_port_bridge_leave(struct sparx5_port *port,
+                                    struct net_device *bridge)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+
+       clear_bit(port->portno, sparx5->bridge_mask);
+       if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
+               sparx5->hw_bridge_dev = NULL;
+
+       /* Clear bridge vlan settings before updating the port settings */
+       port->vlan_aware = 0;
+       port->pvid = NULL_VID;
+       port->vid = NULL_VID;
+
+       /* Port enters in host more therefore restore mc list */
+       __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
+}
+
+static int sparx5_port_changeupper(struct net_device *dev,
+                                  struct netdev_notifier_changeupper_info *info)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       int err = 0;
+
+       if (netif_is_bridge_master(info->upper_dev)) {
+               if (info->linking)
+                       err = sparx5_port_bridge_join(port, info->upper_dev);
+               else
+                       sparx5_port_bridge_leave(port, info->upper_dev);
+
+               sparx5_vlan_port_apply(port->sparx5, port);
+       }
+
+       return err;
+}
+
+static int sparx5_port_add_addr(struct net_device *dev, bool up)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+       u16 vid = port->pvid;
+
+       if (up)
+               sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
+       else
+               sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
+
+       return 0;
+}
+
+static int sparx5_netdevice_port_event(struct net_device *dev,
+                                      struct notifier_block *nb,
+                                      unsigned long event, void *ptr)
+{
+       int err = 0;
+
+       if (!sparx5_netdevice_check(dev))
+               return 0;
+
+       switch (event) {
+       case NETDEV_CHANGEUPPER:
+               err = sparx5_port_changeupper(dev, ptr);
+               break;
+       case NETDEV_PRE_UP:
+               err = sparx5_port_add_addr(dev, true);
+               break;
+       case NETDEV_DOWN:
+               err = sparx5_port_add_addr(dev, false);
+               break;
+       }
+
+       return err;
+}
+
+static int sparx5_netdevice_event(struct notifier_block *nb,
+                                 unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       int ret = 0;
+
+       ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
+
+       return notifier_from_errno(ret);
+}
+
+static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
+{
+       struct sparx5_switchdev_event_work *switchdev_work =
+               container_of(work, struct sparx5_switchdev_event_work, work);
+       struct net_device *dev = switchdev_work->dev;
+       struct switchdev_notifier_fdb_info *fdb_info;
+       struct sparx5_port *port;
+       struct sparx5 *sparx5;
+
+       rtnl_lock();
+       if (!sparx5_netdevice_check(dev))
+               goto out;
+
+       port = netdev_priv(dev);
+       sparx5 = port->sparx5;
+
+       fdb_info = &switchdev_work->fdb_info;
+
+       switch (switchdev_work->event) {
+       case SWITCHDEV_FDB_ADD_TO_DEVICE:
+               if (!fdb_info->added_by_user)
+                       break;
+               sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
+                                     fdb_info->vid);
+               break;
+       case SWITCHDEV_FDB_DEL_TO_DEVICE:
+               if (!fdb_info->added_by_user)
+                       break;
+               sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
+               break;
+       }
+
+out:
+       rtnl_unlock();
+       kfree(switchdev_work->fdb_info.addr);
+       kfree(switchdev_work);
+       dev_put(dev);
+}
+
+static void sparx5_schedule_work(struct work_struct *work)
+{
+       queue_work(sparx5_owq, work);
+}
+
+static int sparx5_switchdev_event(struct notifier_block *unused,
+                                 unsigned long event, void *ptr)
+{
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+       struct sparx5_switchdev_event_work *switchdev_work;
+       struct switchdev_notifier_fdb_info *fdb_info;
+       struct switchdev_notifier_info *info = ptr;
+       int err;
+
+       switch (event) {
+       case SWITCHDEV_PORT_ATTR_SET:
+               err = switchdev_handle_port_attr_set(dev, ptr,
+                                                    sparx5_netdevice_check,
+                                                    sparx5_port_attr_set);
+               return notifier_from_errno(err);
+       case SWITCHDEV_FDB_ADD_TO_DEVICE:
+               fallthrough;
+       case SWITCHDEV_FDB_DEL_TO_DEVICE:
+               switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+               if (!switchdev_work)
+                       return NOTIFY_BAD;
+
+               switchdev_work->dev = dev;
+               switchdev_work->event = event;
+
+               fdb_info = container_of(info,
+                                       struct switchdev_notifier_fdb_info,
+                                       info);
+               INIT_WORK(&switchdev_work->work,
+                         sparx5_switchdev_bridge_fdb_event_work);
+               memcpy(&switchdev_work->fdb_info, ptr,
+                      sizeof(switchdev_work->fdb_info));
+               switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+               if (!switchdev_work->fdb_info.addr)
+                       goto err_addr_alloc;
+
+               ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+                               fdb_info->addr);
+               dev_hold(dev);
+
+               sparx5_schedule_work(&switchdev_work->work);
+               break;
+       }
+
+       return NOTIFY_DONE;
+err_addr_alloc:
+       kfree(switchdev_work);
+       return NOTIFY_BAD;
+}
+
+static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
+                                     struct sparx5_port *port,
+                                     u16 vid, bool add)
+{
+       if (!port ||
+           !test_bit(port->portno, sparx5->bridge_mask))
+               return; /* Skip null/host interfaces */
+
+       /* Bridge connects to vid? */
+       if (add) {
+               /* Add port MAC address from the VLAN */
+               sparx5_mact_learn(sparx5, PGID_CPU,
+                                 port->ndev->dev_addr, vid);
+       } else {
+               /* Control port addr visibility depending on
+                * port VLAN connectivity.
+                */
+               if (test_bit(port->portno, sparx5->vlan_mask[vid]))
+                       sparx5_mact_learn(sparx5, PGID_CPU,
+                                         port->ndev->dev_addr, vid);
+               else
+                       sparx5_mact_forget(sparx5,
+                                          port->ndev->dev_addr, vid);
+       }
+}
+
+static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
+                                       struct sparx5 *sparx5,
+                                       u16 vid, bool add)
+{
+       int i;
+
+       /* First, handle bridge address'es */
+       if (add) {
+               sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
+                                 vid);
+               sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
+                                 vid);
+       } else {
+               sparx5_mact_forget(sparx5, dev->dev_addr, vid);
+               sparx5_mact_forget(sparx5, dev->broadcast, vid);
+       }
+
+       /* Now look at bridged ports */
+       for (i = 0; i < SPX5_PORTS; i++)
+               sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
+}
+
+static int sparx5_handle_port_vlan_add(struct net_device *dev,
+                                      struct notifier_block *nb,
+                                      const struct switchdev_obj_port_vlan *v)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+
+       if (netif_is_bridge_master(dev)) {
+               if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
+                       struct sparx5 *sparx5 =
+                               container_of(nb, struct sparx5,
+                                            switchdev_blocking_nb);
+
+                       sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
+               }
+               return 0;
+       }
+
+       if (!sparx5_netdevice_check(dev))
+               return -EOPNOTSUPP;
+
+       return sparx5_vlan_vid_add(port, v->vid,
+                                 v->flags & BRIDGE_VLAN_INFO_PVID,
+                                 v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+}
+
+static int sparx5_handle_port_obj_add(struct net_device *dev,
+                                     struct notifier_block *nb,
+                                     struct switchdev_notifier_port_obj_info *info)
+{
+       const struct switchdev_obj *obj = info->obj;
+       int err;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               err = sparx5_handle_port_vlan_add(dev, nb,
+                                                 SWITCHDEV_OBJ_PORT_VLAN(obj));
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       info->handled = true;
+       return err;
+}
+
+static int sparx5_handle_port_vlan_del(struct net_device *dev,
+                                      struct notifier_block *nb,
+                                      u16 vid)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       int ret;
+
+       /* Master bridge? */
+       if (netif_is_bridge_master(dev)) {
+               struct sparx5 *sparx5 =
+                       container_of(nb, struct sparx5,
+                                    switchdev_blocking_nb);
+
+               sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
+               return 0;
+       }
+
+       if (!sparx5_netdevice_check(dev))
+               return -EOPNOTSUPP;
+
+       ret = sparx5_vlan_vid_del(port, vid);
+       if (ret)
+               return ret;
+
+       /* Delete the port MAC address with the matching VLAN information */
+       sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
+
+       return 0;
+}
+
+static int sparx5_handle_port_obj_del(struct net_device *dev,
+                                     struct notifier_block *nb,
+                                     struct switchdev_notifier_port_obj_info *info)
+{
+       const struct switchdev_obj *obj = info->obj;
+       int err;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               err = sparx5_handle_port_vlan_del(dev, nb,
+                                                 SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       info->handled = true;
+       return err;
+}
+
+static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
+                                          unsigned long event,
+                                          void *ptr)
+{
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+       int err;
+
+       switch (event) {
+       case SWITCHDEV_PORT_OBJ_ADD:
+               err = sparx5_handle_port_obj_add(dev, nb, ptr);
+               return notifier_from_errno(err);
+       case SWITCHDEV_PORT_OBJ_DEL:
+               err = sparx5_handle_port_obj_del(dev, nb, ptr);
+               return notifier_from_errno(err);
+       case SWITCHDEV_PORT_ATTR_SET:
+               err = switchdev_handle_port_attr_set(dev, ptr,
+                                                    sparx5_netdevice_check,
+                                                    sparx5_port_attr_set);
+               return notifier_from_errno(err);
+       }
+
+       return NOTIFY_DONE;
+}
+
+int sparx5_register_notifier_blocks(struct sparx5 *s5)
+{
+       int err;
+
+       s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
+       err = register_netdevice_notifier(&s5->netdevice_nb);
+       if (err)
+               return err;
+
+       s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
+       err = register_switchdev_notifier(&s5->switchdev_nb);
+       if (err)
+               goto err_switchdev_nb;
+
+       s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
+       err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
+       if (err)
+               goto err_switchdev_blocking_nb;
+
+       sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
+       if (!sparx5_owq)
+               goto err_switchdev_blocking_nb;
+
+       return 0;
+
+err_switchdev_blocking_nb:
+       unregister_switchdev_notifier(&s5->switchdev_nb);
+err_switchdev_nb:
+       unregister_netdevice_notifier(&s5->netdevice_nb);
+
+       return err;
+}
+
+void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
+{
+       destroy_workqueue(sparx5_owq);
+
+       unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
+       unregister_switchdev_notifier(&s5->switchdev_nb);
+       unregister_netdevice_notifier(&s5->netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
new file mode 100644 (file)
index 0000000..4ce490a
--- /dev/null
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
+{
+       u32 mask[3];
+
+       /* Divide up mask in 32 bit words */
+       bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
+
+       /* Output mask to respective registers */
+       spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
+       spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
+       spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
+
+       return 0;
+}
+
+void sparx5_vlan_init(struct sparx5 *sparx5)
+{
+       u16 vid;
+
+       spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
+                ANA_L3_VLAN_CTRL_VLAN_ENA,
+                sparx5,
+                ANA_L3_VLAN_CTRL);
+
+       /* Map VLAN = FID */
+       for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
+               spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
+                        ANA_L3_VLAN_CFG_VLAN_FID,
+                        sparx5,
+                        ANA_L3_VLAN_CFG(vid));
+}
+
+void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
+{
+       struct sparx5_port *port = sparx5->ports[portno];
+
+       /* Configure PVID */
+       spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
+                ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
+                ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
+                ANA_CL_VLAN_CTRL_PORT_VID,
+                sparx5,
+                ANA_CL_VLAN_CTRL(port->portno));
+}
+
+int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
+                       bool untagged)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+       int ret;
+
+       /* Make the port a member of the VLAN */
+       set_bit(port->portno, sparx5->vlan_mask[vid]);
+       ret = sparx5_vlant_set_mask(sparx5, vid);
+       if (ret)
+               return ret;
+
+       /* Default ingress vlan classification */
+       if (pvid)
+               port->pvid = vid;
+
+       /* Untagged egress vlan classification */
+       if (untagged && port->vid != vid) {
+               if (port->vid) {
+                       netdev_err(port->ndev,
+                                  "Port already has a native VLAN: %d\n",
+                                  port->vid);
+                       return -EBUSY;
+               }
+               port->vid = vid;
+       }
+
+       sparx5_vlan_port_apply(sparx5, port);
+
+       return 0;
+}
+
+int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+       int ret;
+
+       /* 8021q removes VID 0 on module unload for all interfaces
+        * with VLAN filtering feature. We need to keep it to receive
+        * untagged traffic.
+        */
+       if (vid == 0)
+               return 0;
+
+       /* Stop the port from being a member of the vlan */
+       clear_bit(port->portno, sparx5->vlan_mask[vid]);
+       ret = sparx5_vlant_set_mask(sparx5, vid);
+       if (ret)
+               return ret;
+
+       /* Ingress */
+       if (port->pvid == vid)
+               port->pvid = 0;
+
+       /* Egress */
+       if (port->vid == vid)
+               port->vid = 0;
+
+       sparx5_vlan_port_apply(sparx5, port);
+
+       return 0;
+}
+
+void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+       u32 val, mask;
+
+       /* mask is spread across 3 registers x 32 bit */
+       if (port->portno < 32) {
+               mask = BIT(port->portno);
+               val = enable ? mask : 0;
+               spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
+       } else if (port->portno < 64) {
+               mask = BIT(port->portno - 32);
+               val = enable ? mask : 0;
+               spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
+       } else if (port->portno < SPX5_PORTS) {
+               mask = BIT(port->portno - 64);
+               val = enable ? mask : 0;
+               spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
+       } else {
+               netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
+       }
+}
+
+void sparx5_update_fwd(struct sparx5 *sparx5)
+{
+       DECLARE_BITMAP(workmask, SPX5_PORTS);
+       u32 mask[3];
+       int port;
+
+       /* Divide up fwd mask in 32 bit words */
+       bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
+
+       /* Update flood masks */
+       for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
+               spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
+               spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
+               spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
+       }
+
+       /* Update SRC masks */
+       for (port = 0; port < SPX5_PORTS; port++) {
+               if (test_bit(port, sparx5->bridge_fwd_mask)) {
+                       /* Allow to send to all bridged but self */
+                       bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
+                       clear_bit(port, workmask);
+                       bitmap_to_arr32(mask, workmask, SPX5_PORTS);
+                       spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
+                       spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
+                       spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
+               } else {
+                       spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
+                       spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
+                       spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
+               }
+       }
+
+       /* Learning enabled only for bridged ports */
+       bitmap_and(workmask, sparx5->bridge_fwd_mask,
+                  sparx5->bridge_lrn_mask, SPX5_PORTS);
+       bitmap_to_arr32(mask, workmask, SPX5_PORTS);
+
+       /* Apply learning mask */
+       spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
+       spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
+       spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
+}
+
+void sparx5_vlan_port_apply(struct sparx5 *sparx5,
+                           struct sparx5_port *port)
+
+{
+       u32 val;
+
+       /* Configure PVID, vlan aware */
+       val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
+               ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
+               ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
+       spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
+
+       val = 0;
+       if (port->vlan_aware && !port->pvid)
+               /* If port is vlan-aware and tagged, drop untagged and
+                * priority tagged frames.
+                */
+               val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
+                       ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
+                       ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
+       spx5_wr(val, sparx5,
+               ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
+
+       /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
+       val = REW_TAG_CTRL_TAG_TPID_CFG_SET(0);
+       if (port->vlan_aware) {
+               if (port->vid)
+                       /* Tag all frames except when VID == DEFAULT_VLAN */
+                       val |= REW_TAG_CTRL_TAG_CFG_SET(1);
+               else
+                       val |= REW_TAG_CTRL_TAG_CFG_SET(3);
+       }
+       spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
+
+       /* Egress VID */
+       spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+                REW_PORT_VLAN_CFG_PORT_VID,
+                sparx5,
+                REW_PORT_VLAN_CFG(port->portno));
+}
index 1c9023d..30e0a10 100644 (file)
@@ -201,6 +201,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
         * calculate the transport header.
         */
        skb_reset_network_header(skb);
+       skb_reset_mac_header(skb);
 
        skb->dev = pctx->dev;
 
index ae0bf71..1becb1a 100644 (file)
@@ -7,10 +7,8 @@
  */
 
 #include <linux/acpi.h>
-#include <linux/acpi_mdio.h>
 #include <linux/fwnode_mdio.h>
 #include <linux/of.h>
-#include <linux/of_mdio.h>
 #include <linux/phy.h>
 
 MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
@@ -144,23 +142,3 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
        return 0;
 }
 EXPORT_SYMBOL(fwnode_mdiobus_register_phy);
-
-/**
- * fwnode_mdiobus_register - bring up all the PHYs on a given MDIO bus and
- *     attach them to it.
- * @bus: Target MDIO bus.
- * @fwnode: Pointer to fwnode of the MDIO controller.
- *
- * Return values are determined accordingly to acpi_/of_ mdiobus_register()
- * operation.
- */
-int fwnode_mdiobus_register(struct mii_bus *bus, struct fwnode_handle *fwnode)
-{
-       if (is_acpi_node(fwnode))
-               return acpi_mdiobus_register(bus, fwnode);
-       else if (is_of_node(fwnode))
-               return of_mdiobus_register(bus, to_of_node(fwnode));
-       else
-               return -EINVAL;
-}
-EXPORT_SYMBOL(fwnode_mdiobus_register);
index 57a5a02..470e1c1 100644 (file)
@@ -74,6 +74,23 @@ MODULE_PARM_DESC (msg_level, "Override default message level");
 
 /*-------------------------------------------------------------------------*/
 
+static const char * const usbnet_event_names[] = {
+       [EVENT_TX_HALT]            = "EVENT_TX_HALT",
+       [EVENT_RX_HALT]            = "EVENT_RX_HALT",
+       [EVENT_RX_MEMORY]          = "EVENT_RX_MEMORY",
+       [EVENT_STS_SPLIT]          = "EVENT_STS_SPLIT",
+       [EVENT_LINK_RESET]         = "EVENT_LINK_RESET",
+       [EVENT_RX_PAUSED]          = "EVENT_RX_PAUSED",
+       [EVENT_DEV_ASLEEP]         = "EVENT_DEV_ASLEEP",
+       [EVENT_DEV_OPEN]           = "EVENT_DEV_OPEN",
+       [EVENT_DEVICE_REPORT_IDLE] = "EVENT_DEVICE_REPORT_IDLE",
+       [EVENT_NO_RUNTIME_PM]      = "EVENT_NO_RUNTIME_PM",
+       [EVENT_RX_KILL]            = "EVENT_RX_KILL",
+       [EVENT_LINK_CHANGE]        = "EVENT_LINK_CHANGE",
+       [EVENT_SET_RX_MODE]        = "EVENT_SET_RX_MODE",
+       [EVENT_NO_IP_ALIGN]        = "EVENT_NO_IP_ALIGN",
+};
+
 /* handles CDC Ethernet and many other network "bulk data" interfaces */
 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
 {
@@ -452,9 +469,9 @@ void usbnet_defer_kevent (struct usbnet *dev, int work)
 {
        set_bit (work, &dev->flags);
        if (!schedule_work (&dev->kevent))
-               netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
+               netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]);
        else
-               netdev_dbg(dev->net, "kevent %d scheduled\n", work);
+               netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]);
 }
 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 
index 0416a7e..b0b8145 100644 (file)
@@ -2847,8 +2847,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
                        ctx[rxq2vq(i)] = true;
        }
 
-       ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
-                                        names, ctx, NULL);
+       ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
+                                 names, ctx, NULL);
        if (ret)
                goto err_find;
 
index f42bf2c..b8a4bbf 100644 (file)
@@ -2795,7 +2795,7 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
        switch (ar->scan.state) {
        case ATH10K_SCAN_IDLE:
        case ATH10K_SCAN_STARTING:
-               ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
+               ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
                goto exit;
        case ATH10K_SCAN_RUNNING:
        case ATH10K_SCAN_ABORTING:
index eb52332..e9b3689 100644 (file)
@@ -1314,10 +1314,16 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
 
        arg->he_flag = true;
 
-       memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
-              sizeof(arg->peer_he_cap_macinfo));
-       memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
-              sizeof(arg->peer_he_cap_phyinfo));
+       memcpy_and_pad(&arg->peer_he_cap_macinfo,
+                      sizeof(arg->peer_he_cap_macinfo),
+                      he_cap->he_cap_elem.mac_cap_info,
+                      sizeof(he_cap->he_cap_elem.mac_cap_info),
+                      0);
+       memcpy_and_pad(&arg->peer_he_cap_phyinfo,
+                      sizeof(arg->peer_he_cap_phyinfo),
+                      he_cap->he_cap_elem.phy_cap_info,
+                      sizeof(he_cap->he_cap_elem.phy_cap_info),
+                      0);
        arg->peer_he_ops = vif->bss_conf.he_oper.params;
 
        /* the top most byte is used to indicate BSS color info */
index f8f6b20..646ad79 100644 (file)
@@ -41,7 +41,7 @@
 static const struct pci_device_id ath11k_pci_id_table[] = {
        { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
        { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
-       /* TODO: add QCN9074_DEVICE_ID) once firmware issues are resolved */
+       { PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
        {0}
 };
 
index cf8e52c..0e3be17 100644 (file)
@@ -445,22 +445,12 @@ out:
        return ret;
 }
 
-static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
-                        enum wcn36xx_hal_host_msg_type msg_type,
-                        size_t msg_size)
-{
-       memset(hdr, 0, msg_size + sizeof(*hdr));
-       hdr->msg_type = msg_type;
-       hdr->msg_version = WCN36XX_HAL_MSG_VERSION0;
-       hdr->len = msg_size + sizeof(*hdr);
-}
-
 #define __INIT_HAL_MSG(msg_body, type, version) \
        do {                                                            \
-               memset(&msg_body, 0, sizeof(msg_body));                 \
-               msg_body.header.msg_type = type;                        \
-               msg_body.header.msg_version = version;                  \
-               msg_body.header.len = sizeof(msg_body);                 \
+               memset(&(msg_body), 0, sizeof(msg_body));               \
+               (msg_body).header.msg_type = type;                      \
+               (msg_body).header.msg_version = version;                \
+               (msg_body).header.len = sizeof(msg_body);               \
        } while (0)                                                     \
 
 #define INIT_HAL_MSG(msg_body, type)   \
@@ -2729,8 +2719,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
 
        msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *)
                   wcn->hal_buf;
-       init_hal_msg(&msg_body->header, WCN36XX_HAL_8023_MULTICAST_LIST_REQ,
-                    sizeof(msg_body->mc_addr_list));
+       INIT_HAL_MSG(*msg_body, WCN36XX_HAL_8023_MULTICAST_LIST_REQ);
 
        /* An empty list means all mc traffic will be received */
        if (fp)
index 65fb038..cedba56 100644 (file)
@@ -2895,8 +2895,13 @@ brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
                                             &cfg->assoclist,
                                             sizeof(cfg->assoclist));
                if (err) {
-                       bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
-                                err);
+                       /* GET_ASSOCLIST unsupported by firmware of older chips */
+                       if (err == -EBADE)
+                               bphy_info_once(drvr, "BRCMF_C_GET_ASSOCLIST unsupported\n");
+                       else
+                               bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST failed, err=%d\n",
+                                        err);
+
                        cfg->assoclist.count = 0;
                        return -EOPNOTSUPP;
                }
@@ -6851,7 +6856,12 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
 
        err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
        if (err) {
-               bphy_err(drvr, "rxchain error (%d)\n", err);
+               /* rxchain unsupported by firmware of older chips */
+               if (err == -EBADE)
+                       bphy_info_once(drvr, "rxchain unsupported\n");
+               else
+                       bphy_err(drvr, "rxchain error (%d)\n", err);
+
                nchain = 1;
        } else {
                for (nchain = 0; rxchain; nchain++)
index cee1682..db5f853 100644 (file)
@@ -188,9 +188,14 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
        /*Finally, pick up the PROMISC flag */
        cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
        err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
-       if (err < 0)
-               bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, %d\n",
-                        err);
+       if (err < 0) {
+               /* PROMISC unsupported by firmware of older chips */
+               if (err == -EBADE)
+                       bphy_info_once(drvr, "BRCMF_C_SET_PROMISC unsupported\n");
+               else
+                       bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, err=%d\n",
+                                err);
+       }
        brcmf_configure_arp_nd_offload(ifp, !cmd_value);
 }
 
index 44ba6f3..9bb5f70 100644 (file)
@@ -60,6 +60,10 @@ void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...);
                                  ##__VA_ARGS__);                       \
        } while (0)
 
+#define bphy_info_once(drvr, fmt, ...)                                 \
+       wiphy_info_once((drvr)->wiphy, "%s: " fmt, __func__,            \
+                       ##__VA_ARGS__)
+
 #if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
 
 /* For debug/tracing purposes treat info messages as errors */
index a755426..2f7bc3a 100644 (file)
 #include "common.h"
 #include "of.h"
 
+static int brcmf_of_get_country_codes(struct device *dev,
+                                     struct brcmf_mp_device *settings)
+{
+       struct device_node *np = dev->of_node;
+       struct brcmfmac_pd_cc_entry *cce;
+       struct brcmfmac_pd_cc *cc;
+       int count;
+       int i;
+
+       count = of_property_count_strings(np, "brcm,ccode-map");
+       if (count < 0) {
+               /* The property is optional, so return success if it doesn't
+                * exist. Otherwise propagate the error code.
+                */
+               return (count == -EINVAL) ? 0 : count;
+       }
+
+       cc = devm_kzalloc(dev, sizeof(*cc) + count * sizeof(*cce), GFP_KERNEL);
+       if (!cc)
+               return -ENOMEM;
+
+       cc->table_size = count;
+
+       for (i = 0; i < count; i++) {
+               const char *map;
+
+               cce = &cc->table[i];
+
+               if (of_property_read_string_index(np, "brcm,ccode-map",
+                                                 i, &map))
+                       continue;
+
+               /* String format e.g. US-Q2-86 */
+               if (sscanf(map, "%2c-%2c-%d", cce->iso3166, cce->cc,
+                          &cce->rev) != 3)
+                       brcmf_err("failed to read country map %s\n", map);
+               else
+                       brcmf_dbg(INFO, "%s-%s-%d\n", cce->iso3166, cce->cc,
+                                 cce->rev);
+       }
+
+       settings->country_codes = cc;
+
+       return 0;
+}
+
 void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
                    struct brcmf_mp_device *settings)
 {
        struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
        struct device_node *root, *np = dev->of_node;
        int irq;
+       int err;
        u32 irqf;
        u32 val;
 
@@ -43,8 +90,14 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
                of_node_put(root);
        }
 
-       if (!np || bus_type != BRCMF_BUSTYPE_SDIO ||
-           !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+       if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+               return;
+
+       err = brcmf_of_get_country_codes(dev, settings);
+       if (err)
+               brcmf_err("failed to get OF country code map (err=%d)\n", err);
+
+       if (bus_type != BRCMF_BUSTYPE_SDIO)
                return;
 
        if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
index 14b0db2..d86918d 100644 (file)
@@ -16,9 +16,10 @@ iwlwifi-objs         += iwl-trans.o
 iwlwifi-objs           += queue/tx.o
 
 iwlwifi-objs           += fw/img.o fw/notif-wait.o
-iwlwifi-objs           += fw/dbg.o fw/pnvm.o
+iwlwifi-objs           += fw/dbg.o fw/pnvm.o fw/dump.o
 iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
 iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
+iwlwifi-$(CONFIG_EFI)  += fw/uefi.o
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
 
 iwlwifi-objs += $(iwlwifi-m)
index c2315de..7f1faa9 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include <linux/module.h>
 #include <linux/stringify.h>
@@ -9,7 +9,7 @@
 #include "iwl-prph.h"
 
 /* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX        63
+#define IWL_22000_UCODE_API_MAX        64
 
 /* Lowest firmware API version supported */
 #define IWL_22000_UCODE_API_MIN        39
@@ -47,6 +47,7 @@
 #define IWL_MA_A_GF_A_FW_PRE           "iwlwifi-ma-a0-gf-a0-"
 #define IWL_MA_A_GF4_A_FW_PRE          "iwlwifi-ma-a0-gf4-a0-"
 #define IWL_MA_A_MR_A_FW_PRE           "iwlwifi-ma-a0-mr-a0-"
+#define IWL_MA_A_FM_A_FW_PRE           "iwlwifi-ma-a0-fm-a0-"
 #define IWL_SNJ_A_MR_A_FW_PRE          "iwlwifi-SoSnj-a0-mr-a0-"
 #define IWL_BZ_A_HR_B_FW_PRE           "iwlwifi-bz-a0-hr-b0-"
 #define IWL_BZ_A_GF_A_FW_PRE           "iwlwifi-bz-a0-gf-a0-"
@@ -93,6 +94,8 @@
        IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
        IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(api)          \
+       IWL_MA_A_FM_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \
        IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
@@ -389,6 +392,7 @@ const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
 const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
 const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
 const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
+const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
 const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
 
 const char iwl_ax200_killer_1650w_name[] =
@@ -724,6 +728,13 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
        .num_rbds = IWL_NUM_RBDS_AX210_HE,
 };
 
+const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
+       .fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
+       .uhb_supported = true,
+       IWL_DEVICE_AX210,
+       .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
 const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
        .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
        .uhb_supported = true,
@@ -797,6 +808,7 @@ MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index df12973..871533b 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include <linux/module.h>
 #include <linux/stringify.h>
@@ -171,8 +171,12 @@ const char iwl9260_killer_1550_name[] =
        "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
 const char iwl9560_killer_1550i_name[] =
        "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
+const char iwl9560_killer_1550i_160_name[] =
+       "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz";
 const char iwl9560_killer_1550s_name[] =
        "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)";
+const char iwl9560_killer_1550s_160_name[] =
+       "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz";
 
 const struct iwl_cfg iwl9260_2ac_cfg = {
        .fw_name_pre = IWL9260_FW_PRE,
index e31bba8..34933f1 100644 (file)
@@ -163,6 +163,27 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
 }
 IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
 
+/*
+ * Evaluate a DSM with no arguments and a u32 return value,
+ */
+int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+                        const guid_t *guid, u32 *value)
+{
+       int ret;
+       u64 val;
+
+       ret = iwl_acpi_get_dsm_integer(dev, rev, func,
+                                      guid, &val, sizeof(u32));
+
+       if (ret < 0)
+               return ret;
+
+       /* cast val (u64) to be u32 */
+       *value = (u32)val;
+       return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
+
 union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
                                         union acpi_object *data,
                                         int data_size, int *tbl_rev)
@@ -696,68 +717,37 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
 }
 IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
 
-static u32 iwl_acpi_eval_dsm_func(struct device *dev, enum iwl_dsm_funcs_rev_0 eval_func)
-{
-       union acpi_object *obj;
-       u32 ret;
-
-       obj = iwl_acpi_get_dsm_object(dev, 0,
-                                     eval_func, NULL,
-                                     &iwl_guid);
-
-       if (IS_ERR(obj)) {
-               IWL_DEBUG_DEV_RADIO(dev,
-                                   "ACPI: DSM func '%d': Got Error in obj = %ld\n",
-                                   eval_func,
-                                   PTR_ERR(obj));
-               return 0;
-       }
-
-       if (obj->type != ACPI_TYPE_INTEGER) {
-               IWL_DEBUG_DEV_RADIO(dev,
-                                   "ACPI: DSM func '%d' did not return a valid object, type=%d\n",
-                                   eval_func,
-                                   obj->type);
-               ret = 0;
-               goto out;
-       }
-
-       ret = obj->integer.value;
-       IWL_DEBUG_DEV_RADIO(dev,
-                           "ACPI: DSM method evaluated: func='%d', ret=%d\n",
-                           eval_func,
-                           ret);
-out:
-       ACPI_FREE(obj);
-       return ret;
-}
-
 __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
 {
-       u32 ret;
+       int ret;
+       u8 value;
        __le32 config_bitmap = 0;
 
        /*
         ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'
         */
-       ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_ENABLE_INDONESIA_5G2);
+       ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
+                                 DSM_FUNC_ENABLE_INDONESIA_5G2,
+                                 &iwl_guid, &value);
 
-       if (ret == DSM_VALUE_INDONESIA_ENABLE)
+       if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
                config_bitmap |=
                        cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
 
        /*
         ** Evaluate func 'DSM_FUNC_DISABLE_SRD'
         */
-       ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_DISABLE_SRD);
-
-       if (ret == DSM_VALUE_SRD_PASSIVE)
-               config_bitmap |=
-                       cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
-
-       else if (ret == DSM_VALUE_SRD_DISABLE)
-               config_bitmap |=
-                       cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+       ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
+                                 DSM_FUNC_DISABLE_SRD,
+                                 &iwl_guid, &value);
+       if (!ret) {
+               if (value == DSM_VALUE_SRD_PASSIVE)
+                       config_bitmap |=
+                               cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+               else if (value == DSM_VALUE_SRD_DISABLE)
+                       config_bitmap |=
+                               cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+       }
 
        return config_bitmap;
 }
index d16e6ec..b858e99 100644 (file)
@@ -78,6 +78,7 @@ enum iwl_dsm_funcs_rev_0 {
        DSM_FUNC_DISABLE_SRD = 1,
        DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
        DSM_FUNC_11AX_ENABLEMENT = 6,
+       DSM_FUNC_ENABLE_UNII4_CHAN = 7
 };
 
 enum iwl_dsm_values_srd {
@@ -116,6 +117,9 @@ void *iwl_acpi_get_object(struct device *dev, acpi_string method);
 int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
                        const guid_t *guid, u8 *value);
 
+int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+                        const guid_t *guid, u32 *value);
+
 union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
                                         union acpi_object *data,
                                         int data_size, int *tbl_rev);
@@ -182,6 +186,12 @@ static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
        return -ENOENT;
 }
 
+static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+                                      const guid_t *guid, u32 *value)
+{
+       return -ENOENT;
+}
+
 static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
                                                       union acpi_object *data,
                                                       int data_size,
index c625d31..ce060c3 100644 (file)
@@ -534,11 +534,6 @@ enum iwl_legacy_cmds {
         */
        OFFLOADS_QUERY_CMD = 0xd5,
 
-       /**
-        * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config
-        */
-       REMOTE_WAKE_CONFIG_CMD = 0xd6,
-
        /**
         * @D0I3_END_CMD: End D0i3/D3 state, no command data
         */
index 7586390..b2e7ef3 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -159,6 +159,22 @@ struct iwl_proto_offload_cmd_v3_large {
        struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
 } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
 
+/**
+ * struct iwl_proto_offload_cmd_v4 - ARP/NS offload configuration
+ * @sta_id: station id
+ * @common: common/IPv4 configuration
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @targ_addrs: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v4 {
+       __le32 sta_id;
+       struct iwl_proto_offload_cmd_common common;
+       __le32 num_valid_ipv6_addrs;
+       struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+       struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_4 */
+
 /*
  * WOWLAN_PATTERNS
  */
@@ -302,13 +318,23 @@ struct iwl_wowlan_patterns_cmd {
        /**
         * @n_patterns: number of patterns
         */
-       __le32 n_patterns;
+       u8 n_patterns;
+
+       /**
+        * @n_patterns: sta_id
+        */
+       u8 sta_id;
+
+       /**
+        * @reserved: reserved for alignment
+        */
+       __le16 reserved;
 
        /**
         * @patterns: the patterns, array length in @n_patterns
         */
        struct iwl_wowlan_pattern_v2 patterns[];
-} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_3 */
 
 enum iwl_wowlan_wakeup_filters {
        IWL_WOWLAN_WAKEUP_MAGIC_PACKET                  = BIT(0),
@@ -339,9 +365,10 @@ enum iwl_wowlan_flags {
 };
 
 /**
- * struct iwl_wowlan_config_cmd - WoWLAN configuration
+ * struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6)
  * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
- * @non_qos_seq: non-QoS sequence counter to use next
+ * @non_qos_seq: non-QoS sequence counter to use next.
+ *               Reserved if the struct has version >= 6.
  * @qos_seq: QoS sequence counters to use next
  * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
  * @is_11n_connection: indicates HT connection
@@ -456,6 +483,23 @@ struct iwl_wowlan_kek_kck_material_cmd_v3 {
        __le32  bigtk_cipher;
 } __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */
 
+struct iwl_wowlan_kek_kck_material_cmd_v4 {
+       __le32  sta_id;
+       u8      kck[IWL_KCK_MAX_SIZE];
+       u8      kek[IWL_KEK_MAX_SIZE];
+       __le16  kck_len;
+       __le16  kek_len;
+       __le64  replay_ctr;
+       __le32  akm;
+       __le32  gtk_cipher;
+       __le32  igtk_cipher;
+       __le32  bigtk_cipher;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_4 */
+
+struct iwl_wowlan_get_status_cmd {
+       __le32  sta_id;
+} __packed; /* WOWLAN_GET_STATUSES_CMD_API_S_VER_1 */
+
 #define RF_KILL_INDICATOR_FOR_WOWLAN   0x87
 
 enum iwl_wowlan_rekey_status {
@@ -604,12 +648,13 @@ struct iwl_wowlan_status_v7 {
 } __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
 
 /**
- * struct iwl_wowlan_status_v9 - WoWLAN status (version 9)
+ * struct iwl_wowlan_status_v9 - WoWLAN status (versions 9 and 10)
  * @gtk: GTK data
  * @igtk: IGTK data
  * @replay_ctr: GTK rekey replay counter
  * @pattern_number: number of the matched pattern
- * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next.
+ *                   Reserved if the struct has version >= 10.
  * @qos_seq_ctr: QoS sequence counters to use next
  * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
  * @num_of_gtk_rekeys: number of GTK rekeys
@@ -638,7 +683,7 @@ struct iwl_wowlan_status_v9 {
        u8 tid_tear_down;
        u8 reserved[3];
        u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
+} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */
 
 /**
  * struct iwl_wowlan_status - WoWLAN status
@@ -683,55 +728,6 @@ static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
        return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
 }
 
-#define IWL_WOWLAN_TCP_MAX_PACKET_LEN          64
-#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN  128
-#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS      2048
-
-struct iwl_tcp_packet_info {
-       __le16 tcp_pseudo_header_checksum;
-       __le16 tcp_payload_length;
-} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
-
-struct iwl_tcp_packet {
-       struct iwl_tcp_packet_info info;
-       u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
-       u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
-} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
-
-struct iwl_remote_wake_packet {
-       struct iwl_tcp_packet_info info;
-       u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
-       u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
-} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
-
-struct iwl_wowlan_remote_wake_config {
-       __le32 connection_max_time; /* unused */
-       /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
-       u8 max_syn_retries;
-       u8 max_data_retries;
-       u8 tcp_syn_ack_timeout;
-       u8 tcp_ack_timeout;
-
-       struct iwl_tcp_packet syn_tx;
-       struct iwl_tcp_packet synack_rx;
-       struct iwl_tcp_packet keepalive_ack_rx;
-       struct iwl_tcp_packet fin_tx;
-
-       struct iwl_remote_wake_packet keepalive_tx;
-       struct iwl_remote_wake_packet wake_rx;
-
-       /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
-       u8 sequence_number_offset;
-       u8 sequence_number_length;
-       u8 token_offset;
-       u8 token_length;
-       /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
-       __le32 initial_sequence_number;
-       __le16 keepalive_interval;
-       __le16 num_tokens;
-       u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
-} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
-
 /* TODO: NetDetect API */
 
 #endif /* __iwl_fw_api_d3_h__ */
index d299bba..985b0dc 100644 (file)
@@ -63,6 +63,12 @@ enum iwl_data_path_subcmd_ids {
         */
        RX_NO_DATA_NOTIF = 0xF5,
 
+       /**
+        * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
+        *      &struct iwl_thermal_dual_chain_request
+        */
+       THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
+
        /**
         * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
         */
@@ -169,4 +175,24 @@ struct iwl_datapath_monitor_notif {
        u8 reserved[3];
 } __packed; /* MONITOR_NTF_API_S_VER_1 */
 
+/**
+ * enum iwl_thermal_dual_chain_req_events - firmware SMPS request event
+ * @THERMAL_DUAL_CHAIN_REQ_ENABLE: (re-)enable dual-chain operation
+ *     (subject to other constraints)
+ * @THERMAL_DUAL_CHAIN_REQ_DISABLE: disable dual-chain operation
+ *     (static SMPS)
+ */
+enum iwl_thermal_dual_chain_req_events {
+       THERMAL_DUAL_CHAIN_REQ_ENABLE,
+       THERMAL_DUAL_CHAIN_REQ_DISABLE,
+}; /* THERMAL_DUAL_CHAIN_DISABLE_STATE_API_E_VER_1 */
+
+/**
+ * struct iwl_thermal_dual_chain_request - SMPS request
+ * @event: the type of request, see &enum iwl_thermal_dual_chain_req_events
+ */
+struct iwl_thermal_dual_chain_request {
+       __le32 event;
+} __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */
+
 #endif /* __iwl_fw_api_datapath_h__ */
index 996d5cc..5a2d9a1 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #ifndef __iwl_fw_dbg_tlv_h__
 #define __iwl_fw_dbg_tlv_h__
@@ -11,6 +11,7 @@
 #define IWL_FW_INI_MAX_NAME                    32
 #define IWL_FW_INI_MAX_CFG_NAME                        64
 #define IWL_FW_INI_DOMAIN_ALWAYS_ON            0
+#define IWL_FW_INI_REGION_V2_MASK              0x0000FFFF
 
 /**
  * struct iwl_fw_ini_hcmd
index dc8f277..cf48c6f 100644 (file)
@@ -452,6 +452,25 @@ struct iwl_lari_config_change_cmd_v3 {
        __le32 oem_11ax_allow_bitmap;
 } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */
 
+/**
+ * struct iwl_lari_config_change_cmd_v4 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ *     different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ *     per country, one to indicate whether to override and the other to
+ *     indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ *     per country, one to indicate whether to override and the other to
+ *     indicate allow/disallow unii4 channels.
+ */
+struct iwl_lari_config_change_cmd_v4 {
+       __le32 config_bitmap;
+       __le32 oem_uhb_allow_bitmap;
+       __le32 oem_11ax_allow_bitmap;
+       __le32 oem_unii4_allow_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */
+
 /**
  * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
  * @status: PNVM image loading status
index cc4e18c..df7c55e 100644 (file)
@@ -1933,6 +1933,13 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
        u32 num_of_ranges, i, size;
        void *range;
 
+       /*
+        * The higher part of the ID in version 2 is irrelevant for
+        * us, so mask it out.
+        */
+       if (le32_to_cpu(reg->hdr.version) == 2)
+               id &= IWL_FW_INI_REGION_V2_MASK;
+
        if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
            !ops->fill_range)
                return 0;
@@ -1957,7 +1964,7 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
        num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
 
        header = (void *)tlv->data;
-       header->region_id = reg->id;
+       header->region_id = cpu_to_le32(id);
        header->num_of_ranges = cpu_to_le32(num_of_ranges);
        header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
        memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
@@ -2752,44 +2759,6 @@ void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt)
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync);
 
-#define FSEQ_REG(x) { .addr = (x), .str = #x, }
-
-void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
-{
-       struct iwl_trans *trans = fwrt->trans;
-       int i;
-       struct {
-               u32 addr;
-               const char *str;
-       } fseq_regs[] = {
-               FSEQ_REG(FSEQ_ERROR_CODE),
-               FSEQ_REG(FSEQ_TOP_INIT_VERSION),
-               FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
-               FSEQ_REG(FSEQ_OTP_VERSION),
-               FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
-               FSEQ_REG(FSEQ_ALIVE_TOKEN),
-               FSEQ_REG(FSEQ_CNVI_ID),
-               FSEQ_REG(FSEQ_CNVR_ID),
-               FSEQ_REG(CNVI_AUX_MISC_CHIP),
-               FSEQ_REG(CNVR_AUX_MISC_CHIP),
-               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
-               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
-       };
-
-       if (!iwl_trans_grab_nic_access(trans))
-               return;
-
-       IWL_ERR(fwrt, "Fseq Registers:\n");
-
-       for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
-               IWL_ERR(fwrt, "0x%08X | %s\n",
-                       iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
-                       fseq_regs[i].str);
-
-       iwl_trans_release_nic_access(trans);
-}
-IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs);
-
 static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend)
 {
        struct iwl_dbg_suspend_resume_cmd cmd = {
index 49fa2f5..c0e84ef 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -321,4 +321,6 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
                fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor);
        }
 }
+
+void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
 #endif  /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
new file mode 100644 (file)
index 0000000..a184220
--- /dev/null
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#include <linux/devcoredump.h>
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "dbg.h"
+#include "debugfs.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table_v1 {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 pc;                 /* program counter */
+       u32 blink1;             /* branch link */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 data3;              /* error-specific data */
+       u32 bcon_time;          /* beacon timer */
+       u32 tsf_low;            /* network timestamp function timer */
+       u32 tsf_hi;             /* network timestamp function timer */
+       u32 gp1;                /* GP1 timer register */
+       u32 gp2;                /* GP2 timer register */
+       u32 gp3;                /* GP3 timer register */
+       u32 ucode_ver;          /* uCode version */
+       u32 hw_ver;             /* HW Silicon version */
+       u32 brd_ver;            /* HW board version */
+       u32 log_pc;             /* log program counter */
+       u32 frame_ptr;          /* frame pointer */
+       u32 stack_ptr;          /* stack pointer */
+       u32 hcmd;               /* last host command header */
+       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
+                                * rxtx_flag */
+       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
+                                * host_flag */
+       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
+                                * enc_flag */
+       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
+                                * time_flag */
+       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
+                                * wico interrupt */
+       u32 isr_pref;           /* isr status register LMPM_NIC_PREF_STAT */
+       u32 wait_event;         /* wait event() caller address */
+       u32 l2p_control;        /* L2pControlField */
+       u32 l2p_duration;       /* L2pDurationField */
+       u32 l2p_mhvalid;        /* L2pMhValidBits */
+       u32 l2p_addr_match;     /* L2pAddrMatchStat */
+       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
+                                * (LMPM_PMG_SEL) */
+       u32 u_timestamp;        /* indicate when the date and time of the
+                                * compilation */
+       u32 flow_handler;       /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
+
+struct iwl_error_event_table {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 trm_hw_status0;     /* TRM HW status */
+       u32 trm_hw_status1;     /* TRM HW status */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 data3;              /* error-specific data */
+       u32 bcon_time;          /* beacon timer */
+       u32 tsf_low;            /* network timestamp function timer */
+       u32 tsf_hi;             /* network timestamp function timer */
+       u32 gp1;                /* GP1 timer register */
+       u32 gp2;                /* GP2 timer register */
+       u32 fw_rev_type;        /* firmware revision type */
+       u32 major;              /* uCode version major */
+       u32 minor;              /* uCode version minor */
+       u32 hw_ver;             /* HW Silicon version */
+       u32 brd_ver;            /* HW board version */
+       u32 log_pc;             /* log program counter */
+       u32 frame_ptr;          /* frame pointer */
+       u32 stack_ptr;          /* stack pointer */
+       u32 hcmd;               /* last host command header */
+       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
+                                * rxtx_flag */
+       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
+                                * host_flag */
+       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
+                                * enc_flag */
+       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
+                                * time_flag */
+       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
+                                * wico interrupt */
+       u32 last_cmd_id;        /* last HCMD id handled by the firmware */
+       u32 wait_event;         /* wait event() caller address */
+       u32 l2p_control;        /* L2pControlField */
+       u32 l2p_duration;       /* L2pDurationField */
+       u32 l2p_mhvalid;        /* L2pMhValidBits */
+       u32 l2p_addr_match;     /* L2pAddrMatchStat */
+       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
+                                * (LMPM_PMG_SEL) */
+       u32 u_timestamp;        /* indicate when the date and time of the
+                                * compilation */
+       u32 flow_handler;       /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 blink1;             /* branch link */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 data3;              /* error-specific data */
+       u32 umac_major;
+       u32 umac_minor;
+       u32 frame_pointer;      /* core register 27*/
+       u32 stack_pointer;      /* core register 28 */
+       u32 cmd_header;         /* latest host cmd sent to UMAC */
+       u32 nic_isr_pref;       /* ISR status register */
+} __packed;
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       struct iwl_umac_error_event_table table = {};
+       u32 base = fwrt->trans->dbg.umac_error_event_table;
+
+       if (!base &&
+           !(fwrt->trans->dbg.error_event_table_tlv_status &
+             IWL_ERROR_EVENT_TABLE_UMAC))
+               return;
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       if (table.valid)
+               fwrt->dump.umac_err_id = table.error_id;
+
+       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+               IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
+                       fwrt->trans->status, table.valid);
+       }
+
+       IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
+               iwl_fw_lookup_assert_desc(table.error_id));
+       IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
+       IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2);
+       IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1);
+       IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2);
+       IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1);
+       IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2);
+       IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3);
+       IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major);
+       IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor);
+       IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer);
+       IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer);
+       IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header);
+       IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref);
+}
+
+static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_num)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       struct iwl_error_event_table table = {};
+       u32 val, base = fwrt->trans->dbg.lmac_error_event_table[lmac_num];
+
+       if (fwrt->cur_fw_img == IWL_UCODE_INIT) {
+               if (!base)
+                       base = fwrt->fw->init_errlog_ptr;
+       } else {
+               if (!base)
+                       base = fwrt->fw->inst_errlog_ptr;
+       }
+
+       if (base < 0x400000) {
+               IWL_ERR(fwrt,
+                       "Not valid error log pointer 0x%08X for %s uCode\n",
+                       base,
+                       (fwrt->cur_fw_img == IWL_UCODE_INIT)
+                       ? "Init" : "RT");
+               return;
+       }
+
+       /* check if there is a HW error */
+       val = iwl_trans_read_mem32(trans, base);
+       if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
+               int err;
+
+               IWL_ERR(trans, "HW error, resetting before reading\n");
+
+               /* reset the device */
+               iwl_trans_sw_reset(trans);
+
+               err = iwl_finish_nic_init(trans, trans->trans_cfg);
+               if (err)
+                       return;
+       }
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       if (table.valid)
+               fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
+
+       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+               IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
+                       fwrt->trans->status, table.valid);
+       }
+
+       /* Do not change this output - scripts rely on it */
+
+       IWL_ERR(fwrt, "Loaded firmware version: %s\n", fwrt->fw->fw_version);
+
+       IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id,
+               iwl_fw_lookup_assert_desc(table.error_id));
+       IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
+       IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
+       IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2);
+       IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1);
+       IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2);
+       IWL_ERR(fwrt, "0x%08X | data1\n", table.data1);
+       IWL_ERR(fwrt, "0x%08X | data2\n", table.data2);
+       IWL_ERR(fwrt, "0x%08X | data3\n", table.data3);
+       IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time);
+       IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low);
+       IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi);
+       IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1);
+       IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2);
+       IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type);
+       IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major);
+       IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor);
+       IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver);
+       IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver);
+       IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd);
+       IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0);
+       IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1);
+       IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2);
+       IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3);
+       IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4);
+       IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id);
+       IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event);
+       IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control);
+       IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration);
+       IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+       IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+       IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+       IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp);
+       IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+/*
+ * TCM error struct.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_tcm_error_event_table {
+       u32 valid;
+       u32 error_id;
+       u32 blink2;
+       u32 ilink1;
+       u32 ilink2;
+       u32 data1, data2, data3;
+       u32 logpc;
+       u32 frame_pointer;
+       u32 stack_pointer;
+       u32 msgid;
+       u32 isr;
+       u32 hw_status[5];
+       u32 sw_status[1];
+       u32 reserved[4];
+} __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */
+
+static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       struct iwl_tcm_error_event_table table = {};
+       u32 base = fwrt->trans->dbg.tcm_error_event_table;
+       int i;
+
+       if (!base ||
+           !(fwrt->trans->dbg.error_event_table_tlv_status &
+             IWL_ERROR_EVENT_TABLE_TCM))
+               return;
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       IWL_ERR(fwrt, "TCM status:\n");
+       IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
+       IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
+       IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
+       IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2);
+       IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1);
+       IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2);
+       IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3);
+       IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc);
+       IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer);
+       IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer);
+       IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid);
+       IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr);
+       for (i = 0; i < ARRAY_SIZE(table.hw_status); i++)
+               IWL_ERR(fwrt, "0x%08X | tcm HW status[%d]\n",
+                       table.hw_status[i], i);
+       for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
+               IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
+                       table.sw_status[i], i);
+}
+
+static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       u32 error, data1;
+
+       if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+               error = UMAG_SB_CPU_2_STATUS;
+               data1 = UMAG_SB_CPU_1_STATUS;
+       } else if (fwrt->trans->trans_cfg->device_family >=
+                  IWL_DEVICE_FAMILY_8000) {
+               error = SB_CPU_2_STATUS;
+               data1 = SB_CPU_1_STATUS;
+       } else {
+               return;
+       }
+
+       error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
+
+       IWL_ERR(trans, "IML/ROM dump:\n");
+
+       if (error & 0xFFFF0000)
+               IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
+
+       IWL_ERR(fwrt, "0x%08X | IML/ROM error/state\n", error);
+       IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n",
+               iwl_read_umac_prph(trans, data1));
+
+       if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+               IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
+                       iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
+}
+
+#define FSEQ_REG(x) { .addr = (x), .str = #x, }
+
+static void iwl_fwrt_dump_fseq_regs(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       int i;
+       struct {
+               u32 addr;
+               const char *str;
+       } fseq_regs[] = {
+               FSEQ_REG(FSEQ_ERROR_CODE),
+               FSEQ_REG(FSEQ_TOP_INIT_VERSION),
+               FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
+               FSEQ_REG(FSEQ_OTP_VERSION),
+               FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
+               FSEQ_REG(FSEQ_ALIVE_TOKEN),
+               FSEQ_REG(FSEQ_CNVI_ID),
+               FSEQ_REG(FSEQ_CNVR_ID),
+               FSEQ_REG(CNVI_AUX_MISC_CHIP),
+               FSEQ_REG(CNVR_AUX_MISC_CHIP),
+               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
+               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
+       };
+
+       if (!iwl_trans_grab_nic_access(trans))
+               return;
+
+       IWL_ERR(fwrt, "Fseq Registers:\n");
+
+       for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
+               IWL_ERR(fwrt, "0x%08X | %s\n",
+                       iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
+                       fseq_regs[i].str);
+
+       iwl_trans_release_nic_access(trans);
+}
+
+void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
+{
+       if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
+               IWL_ERR(fwrt,
+                       "DEVICE_ENABLED bit is not set. Aborting dump.\n");
+               return;
+       }
+
+       iwl_fwrt_dump_lmac_error_log(fwrt, 0);
+       if (fwrt->trans->dbg.lmac_error_event_table[1])
+               iwl_fwrt_dump_lmac_error_log(fwrt, 1);
+       iwl_fwrt_dump_umac_error_log(fwrt);
+       iwl_fwrt_dump_tcm_error_log(fwrt);
+       iwl_fwrt_dump_iml_error_log(fwrt);
+       iwl_fwrt_dump_fseq_regs(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs);
index f9c5cf5..9a8c7b7 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2008-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2008-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -52,7 +52,8 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_INIT_DATA         = 4,
        IWL_UCODE_TLV_BOOT              = 5,
        IWL_UCODE_TLV_PROBE_MAX_LEN     = 6, /* a u32 value */
-       IWL_UCODE_TLV_PAN               = 7,
+       IWL_UCODE_TLV_PAN               = 7, /* deprecated -- only used in DVM */
+       IWL_UCODE_TLV_MEM_DESC          = 7, /* replaces PAN in non-DVM */
        IWL_UCODE_TLV_RUNT_EVTLOG_PTR   = 8,
        IWL_UCODE_TLV_RUNT_EVTLOG_SIZE  = 9,
        IWL_UCODE_TLV_RUNT_ERRLOG_PTR   = 10,
@@ -97,6 +98,7 @@ enum iwl_ucode_tlv_type {
 
        IWL_UCODE_TLV_PNVM_VERSION              = 62,
        IWL_UCODE_TLV_PNVM_SKU                  = 64,
+       IWL_UCODE_TLV_TCM_DEBUG_ADDRS           = 65,
 
        IWL_UCODE_TLV_FW_NUM_STATIONS           = IWL_UCODE_TLV_CONST_BASE + 0,
 
@@ -277,10 +279,11 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_BAND_IN_RX_DATA       = (__force iwl_ucode_tlv_api_t)59,
 
 
-       NUM_IWL_UCODE_TLV_API
 #ifdef __CHECKER__
-               /* sparse says it cannot increment the previous enum member */
-               = 128
+       /* sparse says it cannot increment the previous enum member */
+#define NUM_IWL_UCODE_TLV_API 128
+#else
+       NUM_IWL_UCODE_TLV_API
 #endif
 };
 
@@ -411,6 +414,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_PROTECTED_TWT                = (__force iwl_ucode_tlv_capa_t)56,
        IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE           = (__force iwl_ucode_tlv_capa_t)57,
        IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN            = (__force iwl_ucode_tlv_capa_t)58,
+       IWL_UCODE_TLV_CAPA_BROADCAST_TWT                = (__force iwl_ucode_tlv_capa_t)60,
 
        /* set 2 */
        IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE         = (__force iwl_ucode_tlv_capa_t)64,
@@ -446,10 +450,11 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT                = (__force iwl_ucode_tlv_capa_t)100,
        IWL_UCODE_TLV_CAPA_RFIM_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)102,
 
-       NUM_IWL_UCODE_TLV_CAPA
 #ifdef __CHECKER__
-               /* sparse says it cannot increment the previous enum member */
-               = 128
+       /* sparse says it cannot increment the previous enum member */
+#define NUM_IWL_UCODE_TLV_CAPA 128
+#else
+       NUM_IWL_UCODE_TLV_CAPA
 #endif
 };
 
@@ -946,6 +951,10 @@ struct iwl_fw_cmd_version {
        u8 notif_ver;
 } __packed;
 
+struct iwl_fw_tcm_error_addr {
+       __le32 addr;
+}; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */
+
 static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
                                        size_t fixed_size, size_t var_size)
 {
index 40f2109..2403490 100644 (file)
@@ -10,7 +10,7 @@
 #include "fw/api/commands.h"
 #include "fw/api/nvm-reg.h"
 #include "fw/api/alive.h"
-#include <linux/efi.h>
+#include "fw/uefi.h"
 
 struct iwl_pnvm_section {
        __le32 offset;
@@ -220,83 +220,6 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
        return -ENOENT;
 }
 
-#if defined(CONFIG_EFI)
-
-#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b,  \
-                                 0xb2, 0xec, 0xf5, 0xa3,       \
-                                 0x59, 0x4f, 0x4a, 0xea)
-
-#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
-
-#define IWL_HARDCODED_PNVM_SIZE 4096
-
-struct pnvm_sku_package {
-       u8 rev;
-       u8 reserved1[3];
-       u32 total_size;
-       u8 n_skus;
-       u8 reserved2[11];
-       u8 data[];
-};
-
-static int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
-                                u8 **data, size_t *len)
-{
-       struct efivar_entry *pnvm_efivar;
-       struct pnvm_sku_package *package;
-       unsigned long package_size;
-       int err;
-
-       pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
-       if (!pnvm_efivar)
-               return -ENOMEM;
-
-       memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
-              sizeof(IWL_UEFI_OEM_PNVM_NAME));
-       pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
-
-       /*
-        * TODO: we hardcode a maximum length here, because reading
-        * from the UEFI is not working.  To implement this properly,
-        * we have to call efivar_entry_size().
-        */
-       package_size = IWL_HARDCODED_PNVM_SIZE;
-
-       package = kmalloc(package_size, GFP_KERNEL);
-       if (!package) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       err = efivar_entry_get(pnvm_efivar, NULL, &package_size, package);
-       if (err) {
-               IWL_DEBUG_FW(trans,
-                            "PNVM UEFI variable not found %d (len %lu)\n",
-                            err, package_size);
-               goto out;
-       }
-
-       IWL_DEBUG_FW(trans, "Read PNVM fro UEFI with size %lu\n", package_size);
-
-       *data = kmemdup(package->data, *len, GFP_KERNEL);
-       if (!*data)
-               err = -ENOMEM;
-       *len = package_size - sizeof(*package);
-
-out:
-       kfree(package);
-       kfree(pnvm_efivar);
-
-       return err;
-}
-#else /* CONFIG_EFI */
-static inline int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
-                                       u8 **data, size_t *len)
-{
-       return -EOPNOTSUPP;
-}
-#endif /* CONFIG_EFI */
-
 static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
 {
        const struct firmware *pnvm;
@@ -335,6 +258,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
 {
        u8 *data;
        size_t len;
+       struct pnvm_sku_package *package;
        struct iwl_notification_wait pnvm_wait;
        static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
                                                PNVM_INIT_COMPLETE_NTFY) };
@@ -356,9 +280,19 @@ int iwl_pnvm_load(struct iwl_trans *trans,
        }
 
        /* First attempt to get the PNVM from BIOS */
-       ret = iwl_pnvm_get_from_efi(trans, &data, &len);
-       if (!ret)
-               goto parse;
+       package = iwl_uefi_get_pnvm(trans, &len);
+       if (!IS_ERR_OR_NULL(package)) {
+               data = kmemdup(package->data, len, GFP_KERNEL);
+
+               /* free package regardless of whether kmemdup succeeded */
+               kfree(package);
+
+               if (data) {
+                       /* we need only the data size */
+                       len -= sizeof(*package);
+                       goto parse;
+               }
+       }
 
        /* If it's not available, try from the filesystem */
        ret = iwl_pnvm_get_from_fs(trans, &data, &len);
@@ -379,6 +313,30 @@ parse:
        kfree(data);
 
 skip_parse:
+       data = NULL;
+       /* now try to get the reduce power table, if not loaded yet */
+       if (!trans->reduce_power_loaded) {
+               data = iwl_uefi_get_reduced_power(trans, &len);
+               if (IS_ERR_OR_NULL(data)) {
+                       /*
+                        * Pretend we've loaded it - at least we've tried and
+                        * couldn't load it at all, so there's no point in
+                        * trying again over and over.
+                        */
+                       trans->reduce_power_loaded = true;
+
+                       goto skip_reduce_power;
+               }
+       }
+
+       ret = iwl_trans_set_reduce_power(trans, data, len);
+       if (ret)
+               IWL_DEBUG_FW(trans,
+                            "Failed to set reduce power table %d\n",
+                            ret);
+       kfree(data);
+
+skip_reduce_power:
        iwl_init_notification_wait(notif_wait, &pnvm_wait,
                                   ntf_cmds, ARRAY_SIZE(ntf_cmds),
                                   iwl_pnvm_complete_fn, trans);
index e4f91bc..61d3d4e 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /******************************************************************************
  *
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2020-2021 Intel Corporation
  *
  *****************************************************************************/
 
@@ -10,7 +10,7 @@
 
 #include "fw/notif-wait.h"
 
-#define MVM_UCODE_PNVM_TIMEOUT (HZ / 10)
+#define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
 
 int iwl_pnvm_load(struct iwl_trans *trans,
                  struct iwl_notif_wait_data *notif_wait);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
new file mode 100644 (file)
index 0000000..a7c79d8
--- /dev/null
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include "iwl-drv.h"
+#include "pnvm.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+
+#include "fw/uefi.h"
+#include "fw/api/alive.h"
+#include <linux/efi.h>
+
+#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b,  \
+                                 0xb2, 0xec, 0xf5, 0xa3,       \
+                                 0x59, 0x4f, 0x4a, 0xea)
+
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+{
+       struct efivar_entry *pnvm_efivar;
+       void *data;
+       unsigned long package_size;
+       int err;
+
+       *len = 0;
+
+       pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
+       if (!pnvm_efivar)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
+              sizeof(IWL_UEFI_OEM_PNVM_NAME));
+       pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+
+       /*
+        * TODO: we hardcode a maximum length here, because reading
+        * from the UEFI is not working.  To implement this properly,
+        * we have to call efivar_entry_size().
+        */
+       package_size = IWL_HARDCODED_PNVM_SIZE;
+
+       data = kmalloc(package_size, GFP_KERNEL);
+       if (!data) {
+               data = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data);
+       if (err) {
+               IWL_DEBUG_FW(trans,
+                            "PNVM UEFI variable not found %d (len %zd)\n",
+                            err, package_size);
+               kfree(data);
+               data = ERR_PTR(err);
+               goto out;
+       }
+
+       IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %zd\n", package_size);
+       *len = package_size;
+
+out:
+       kfree(pnvm_efivar);
+
+       return data;
+}
+
+static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
+                                          const u8 *data, size_t len)
+{
+       struct iwl_ucode_tlv *tlv;
+       u8 *reduce_power_data = NULL, *tmp;
+       u32 size = 0;
+
+       IWL_DEBUG_FW(trans, "Handling REDUCE_POWER section\n");
+
+       while (len >= sizeof(*tlv)) {
+               u32 tlv_len, tlv_type;
+
+               len -= sizeof(*tlv);
+               tlv = (void *)data;
+
+               tlv_len = le32_to_cpu(tlv->length);
+               tlv_type = le32_to_cpu(tlv->type);
+
+               if (len < tlv_len) {
+                       IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+                               len, tlv_len);
+                       reduce_power_data = ERR_PTR(-EINVAL);
+                       goto out;
+               }
+
+               data += sizeof(*tlv);
+
+               switch (tlv_type) {
+               case IWL_UCODE_TLV_MEM_DESC: {
+                       IWL_DEBUG_FW(trans,
+                                    "Got IWL_UCODE_TLV_MEM_DESC len %d\n",
+                                    tlv_len);
+
+                       IWL_DEBUG_FW(trans, "Adding data (size %d)\n", tlv_len);
+
+                       tmp = krealloc(reduce_power_data, size + tlv_len, GFP_KERNEL);
+                       if (!tmp) {
+                               IWL_DEBUG_FW(trans,
+                                            "Couldn't allocate (more) reduce_power_data\n");
+
+                               reduce_power_data = ERR_PTR(-ENOMEM);
+                               goto out;
+                       }
+
+                       reduce_power_data = tmp;
+
+                       memcpy(reduce_power_data + size, data, tlv_len);
+
+                       size += tlv_len;
+
+                       break;
+               }
+               case IWL_UCODE_TLV_PNVM_SKU:
+                       IWL_DEBUG_FW(trans,
+                                    "New REDUCE_POWER section started, stop parsing.\n");
+                       goto done;
+               default:
+                       IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n",
+                                    tlv_type, tlv_len);
+                       break;
+               }
+
+               len -= ALIGN(tlv_len, 4);
+               data += ALIGN(tlv_len, 4);
+       }
+
+done:
+       if (!size) {
+               IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
+               reduce_power_data = ERR_PTR(-ENOENT);
+               goto out;
+       }
+
+       IWL_INFO(trans, "loaded REDUCE_POWER\n");
+
+out:
+       return reduce_power_data;
+}
+
+static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
+                                        const u8 *data, size_t len)
+{
+       struct iwl_ucode_tlv *tlv;
+       void *sec_data;
+
+       IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
+
+       while (len >= sizeof(*tlv)) {
+               u32 tlv_len, tlv_type;
+
+               len -= sizeof(*tlv);
+               tlv = (void *)data;
+
+               tlv_len = le32_to_cpu(tlv->length);
+               tlv_type = le32_to_cpu(tlv->type);
+
+               if (len < tlv_len) {
+                       IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+                               len, tlv_len);
+                       return ERR_PTR(-EINVAL);
+               }
+
+               if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
+                       struct iwl_sku_id *sku_id =
+                               (void *)(data + sizeof(*tlv));
+
+                       IWL_DEBUG_FW(trans,
+                                    "Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
+                                    tlv_len);
+                       IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
+                                    le32_to_cpu(sku_id->data[0]),
+                                    le32_to_cpu(sku_id->data[1]),
+                                    le32_to_cpu(sku_id->data[2]));
+
+                       data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+                       len -= ALIGN(tlv_len, 4);
+
+                       if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
+                           trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
+                           trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+                               sec_data = iwl_uefi_reduce_power_section(trans,
+                                                                        data,
+                                                                        len);
+                               if (!IS_ERR(sec_data))
+                                       return sec_data;
+                       } else {
+                               IWL_DEBUG_FW(trans, "SKU ID didn't match!\n");
+                       }
+               } else {
+                       data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+                       len -= ALIGN(tlv_len, 4);
+               }
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
+{
+       struct efivar_entry *reduce_power_efivar;
+       struct pnvm_sku_package *package;
+       void *data = NULL;
+       unsigned long package_size;
+       int err;
+
+       *len = 0;
+
+       reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL);
+       if (!reduce_power_efivar)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME,
+              sizeof(IWL_UEFI_REDUCED_POWER_NAME));
+       reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+
+       /*
+        * TODO: we hardcode a maximum length here, because reading
+        * from the UEFI is not working.  To implement this properly,
+        * we have to call efivar_entry_size().
+        */
+       package_size = IWL_HARDCODED_REDUCE_POWER_SIZE;
+
+       package = kmalloc(package_size, GFP_KERNEL);
+       if (!package) {
+               package = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package);
+       if (err) {
+               IWL_DEBUG_FW(trans,
+                            "Reduced Power UEFI variable not found %d (len %lu)\n",
+                            err, package_size);
+               kfree(package);
+               data = ERR_PTR(err);
+               goto out;
+       }
+
+       IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
+                    package_size);
+       *len = package_size;
+
+       IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
+                    package->rev, package->total_size, package->n_skus);
+
+       data = iwl_uefi_reduce_power_parse(trans, package->data,
+                                          *len - sizeof(*package));
+
+       kfree(package);
+
+out:
+       kfree(reduce_power_efivar);
+
+       return data;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
new file mode 100644 (file)
index 0000000..45d0b36
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+
+#define IWL_UEFI_OEM_PNVM_NAME         L"UefiCnvWlanOemSignedPnvm"
+#define IWL_UEFI_REDUCED_POWER_NAME    L"UefiCnvWlanReducedPower"
+
+/*
+ * TODO: we have these hardcoded values that the caller must pass,
+ * because reading from the UEFI is not working.  To implement this
+ * properly, we have to change iwl_pnvm_get_from_uefi() to call
+ * efivar_entry_size() and return the value to the caller instead.
+ */
+#define IWL_HARDCODED_PNVM_SIZE                4096
+#define IWL_HARDCODED_REDUCE_POWER_SIZE        32768
+
+struct pnvm_sku_package {
+       u8 rev;
+       u32 total_size;
+       u8 n_skus;
+       u32 reserved[2];
+       u8 data[];
+} __packed;
+
+#ifdef CONFIG_EFI
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
+#else /* CONFIG_EFI */
+static inline
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* CONFIG_EFI */
index b35ffdf..bf6ee56 100644 (file)
@@ -426,6 +426,7 @@ struct iwl_cfg {
 #define IWL_CFG_RF_TYPE_HR1            0x10C
 #define IWL_CFG_RF_TYPE_GF             0x10D
 #define IWL_CFG_RF_TYPE_MR             0x110
+#define IWL_CFG_RF_TYPE_FM             0x112
 
 #define IWL_CFG_RF_ID_TH               0x1
 #define IWL_CFG_RF_ID_TH1              0x1
@@ -505,8 +506,11 @@ extern const char iwl_ax201_killer_1650s_name[];
 extern const char iwl_ax201_killer_1650i_name[];
 extern const char iwl_ax210_killer_1675w_name[];
 extern const char iwl_ax210_killer_1675x_name[];
+extern const char iwl9560_killer_1550i_160_name[];
+extern const char iwl9560_killer_1550s_160_name[];
 extern const char iwl_ax211_name[];
 extern const char iwl_ax221_name[];
+extern const char iwl_ax231_name[];
 extern const char iwl_ax411_name[];
 #if IS_ENABLED(CONFIG_IWLDVM)
 extern const struct iwl_cfg iwl5300_agn_cfg;
@@ -586,7 +590,6 @@ extern const struct iwl_cfg iwl_qu_b0_hr_b0;
 extern const struct iwl_cfg iwl_qu_c0_hr_b0;
 extern const struct iwl_cfg iwl_ax200_cfg_cc;
 extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
-extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
 extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
 extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
 extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
@@ -613,6 +616,7 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
 extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
 extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
 extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
index 2be605c..e1fec23 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2018, 2020 Intel Corporation
+ * Copyright (C) 2018, 2020-2021 Intel Corporation
  */
 #ifndef __iwl_context_info_file_gen3_h__
 #define __iwl_context_info_file_gen3_h__
@@ -127,6 +127,17 @@ struct iwl_prph_scratch_rbd_cfg {
        __le32 reserved;
 } __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
 
+/*
+ * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
+ * @base_addr: reduce power table address
+ * @size: table size in dwords
+ */
+struct iwl_prph_scratch_uefi_cfg {
+       __le64 base_addr;
+       __le32 size;
+       __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
+
 /*
  * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
  * @version: version information of context info and HW
@@ -141,6 +152,7 @@ struct iwl_prph_scratch_ctrl_cfg {
        struct iwl_prph_scratch_pnvm_cfg pnvm_cfg;
        struct iwl_prph_scratch_hwm_cfg hwm_cfg;
        struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+       struct iwl_prph_scratch_uefi_cfg reduce_power_cfg;
 } __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
 
 /*
@@ -151,7 +163,7 @@ struct iwl_prph_scratch_ctrl_cfg {
  */
 struct iwl_prph_scratch {
        struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
-       __le32 reserved[16];
+       __le32 reserved[12];
        struct iwl_context_info_dram dram;
 } __packed; /* PERIPH_SCRATCH_S */
 
@@ -245,9 +257,11 @@ struct iwl_context_info_gen3 {
 
 int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
                                 const struct fw_img *fw);
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
 
 int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
                                          const void *data, u32 len);
+int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+                                                 const void *data, u32 len);
 
 #endif /* __iwl_context_info_file_gen3_h__ */
index db312ab..47e5a17 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2016 Intel Deutschland GmbH
  */
@@ -325,9 +325,6 @@ enum {
 #define CSR_HW_RF_ID_TYPE_GF           (0x0010D000)
 #define CSR_HW_RF_ID_TYPE_GF4          (0x0010E000)
 
-/* HW_RF CHIP ID  */
-#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
-
 /* HW_RF CHIP STEP  */
 #define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
 
index 4cd8c39..0ddd255 100644 (file)
@@ -57,7 +57,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
        [IWL_DBG_TLV_TYPE_DEBUG_INFO]   = {.min_ver = 1, .max_ver = 1,},
        [IWL_DBG_TLV_TYPE_BUF_ALLOC]    = {.min_ver = 1, .max_ver = 1,},
        [IWL_DBG_TLV_TYPE_HCMD]         = {.min_ver = 1, .max_ver = 1,},
-       [IWL_DBG_TLV_TYPE_REGION]       = {.min_ver = 1, .max_ver = 1,},
+       [IWL_DBG_TLV_TYPE_REGION]       = {.min_ver = 1, .max_ver = 2,},
        [IWL_DBG_TLV_TYPE_TRIGGER]      = {.min_ver = 1, .max_ver = 1,},
 };
 
@@ -178,9 +178,20 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
        u32 type = le32_to_cpu(reg->type);
        u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
 
+       /*
+        * The higher part of the ID in version 2 is irrelevant for
+        * us, so mask it out.
+        */
+       if (le32_to_cpu(reg->hdr.version) == 2)
+               id &= IWL_FW_INI_REGION_V2_MASK;
+
        if (le32_to_cpu(tlv->length) < sizeof(*reg))
                return -EINVAL;
 
+       /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
+       IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
+                    IWL_FW_INI_MAX_NAME, reg->name);
+
        if (id >= IWL_FW_INI_MAX_REGION_ID) {
                IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
                return -EINVAL;
index 884750b..977dce6 100644 (file)
@@ -1117,6 +1117,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                IWL_ERROR_EVENT_TABLE_LMAC1;
                        break;
                        }
+               case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: {
+                       struct iwl_fw_tcm_error_addr *ptr = (void *)tlv_data;
+
+                       if (tlv_len != sizeof(*ptr))
+                               goto invalid_tlv_len;
+                       drv->trans->dbg.tcm_error_event_table =
+                               le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL;
+                       drv->trans->dbg.error_event_table_tlv_status |=
+                               IWL_ERROR_EVENT_TABLE_TCM;
+                       break;
+                       }
                case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
                case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
                case IWL_UCODE_TLV_TYPE_HCMD:
index fc75d04..850648e 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -549,8 +549,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                .mac_cap_info[2] =
                                        IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
                                .mac_cap_info[3] =
-                                       IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
-                                       IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
+                                       IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
                                .mac_cap_info[4] =
                                        IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU |
                                        IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
@@ -579,25 +578,20 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                        IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
                                        IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
                                        IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
-                               .phy_cap_info[5] =
-                                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
-                                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
                                .phy_cap_info[6] =
                                        IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
                                        IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
                                        IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
                                .phy_cap_info[7] =
                                        IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
-                                       IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
-                                       IEEE80211_HE_PHY_CAP7_MAX_NC_1,
+                                       IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
                                .phy_cap_info[8] =
                                        IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
                                        IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
                                        IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
                                        IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
-                                       IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
+                                       IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
                                .phy_cap_info[9] =
-                                       IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
                                        IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
                                        IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
                                        IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
@@ -632,19 +626,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                .mac_cap_info[1] =
                                        IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
                                        IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
-                               .mac_cap_info[2] =
-                                       IEEE80211_HE_MAC_CAP2_BSR,
                                .mac_cap_info[3] =
-                                       IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
-                                       IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
-                               .mac_cap_info[4] =
-                                       IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
-                               .mac_cap_info[5] =
-                                       IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
+                                       IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
                                .phy_cap_info[0] =
                                        IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
-                                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
-                                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+                                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
                                .phy_cap_info[1] =
                                        IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
                                .phy_cap_info[2] =
@@ -654,27 +640,14 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
-                               .phy_cap_info[4] =
-                                       IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
-                                       IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
-                                       IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
-                               .phy_cap_info[5] =
-                                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
-                                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
                                .phy_cap_info[6] =
                                        IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
                                .phy_cap_info[7] =
-                                       IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
-                                       IEEE80211_HE_PHY_CAP7_MAX_NC_1,
+                                       IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
                                .phy_cap_info[8] =
                                        IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
-                                       IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
-                                       IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
-                                       IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
-                                       IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
+                                       IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
                                .phy_cap_info[9] =
-                                       IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
-                                       IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
                                        IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
                        },
                        /*
@@ -745,12 +718,72 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
                iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
 }
 
+static void
+iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
+                        struct ieee80211_supported_band *sband,
+                        struct ieee80211_sband_iftype_data *iftype_data,
+                        u8 tx_chains, u8 rx_chains,
+                        const struct iwl_fw *fw)
+{
+       bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
+
+       /* Advertise an A-MPDU exponent extension based on
+        * operating band
+        */
+       if (sband->band != NL80211_BAND_2GHZ)
+               iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
+                       IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1;
+       else
+               iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
+                       IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
+
+       if (is_ap && iwlwifi_mod_params.nvm_file)
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
+                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+       if ((tx_chains & rx_chains) == ANT_AB) {
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |=
+                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
+               if (!is_ap)
+                       iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
+                               IEEE80211_HE_PHY_CAP7_MAX_NC_2;
+       } else if (!is_ap) {
+               /* If not 2x2, we need to indicate 1x1 in the
+                * Midamble RX Max NSTS - but not for AP mode
+                */
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
+                       ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
+                       ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
+                       IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+       }
+
+       switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+       case IWL_CFG_RF_TYPE_GF:
+       case IWL_CFG_RF_TYPE_MR:
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
+                       IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+               if (!is_ap)
+                       iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
+                               IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+               break;
+       }
+
+       if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
+               iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
+                       IEEE80211_HE_MAC_CAP2_BCAST_TWT;
+}
+
 static void iwl_init_he_hw_capab(struct iwl_trans *trans,
                                 struct iwl_nvm_data *data,
                                 struct ieee80211_supported_band *sband,
-                                u8 tx_chains, u8 rx_chains)
+                                u8 tx_chains, u8 rx_chains,
+                                const struct iwl_fw *fw)
 {
        struct ieee80211_sband_iftype_data *iftype_data;
+       int i;
 
        /* should only initialize once */
        if (WARN_ON(sband->iftype_data))
@@ -777,26 +810,18 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
        sband->iftype_data = iftype_data;
        sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
 
-       /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
-       if ((tx_chains & rx_chains) != ANT_AB) {
-               int i;
-
-               for (i = 0; i < sband->n_iftype_data; i++) {
-                       iftype_data[i].he_cap.he_cap_elem.phy_cap_info[1] &=
-                               ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
-                       iftype_data[i].he_cap.he_cap_elem.phy_cap_info[2] &=
-                               ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
-                       iftype_data[i].he_cap.he_cap_elem.phy_cap_info[7] &=
-                               ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
-               }
-       }
+       for (i = 0; i < sband->n_iftype_data; i++)
+               iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i],
+                                        tx_chains, rx_chains, fw);
+
        iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains);
 }
 
 static void iwl_init_sbands(struct iwl_trans *trans,
                            struct iwl_nvm_data *data,
                            const void *nvm_ch_flags, u8 tx_chains,
-                           u8 rx_chains, u32 sbands_flags, bool v4)
+                           u8 rx_chains, u32 sbands_flags, bool v4,
+                           const struct iwl_fw *fw)
 {
        struct device *dev = trans->dev;
        const struct iwl_cfg *cfg = trans->cfg;
@@ -816,7 +841,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
                             tx_chains, rx_chains);
 
        if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
-               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
+               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+                                    fw);
 
        sband = &data->bands[NL80211_BAND_5GHZ];
        sband->band = NL80211_BAND_5GHZ;
@@ -831,7 +857,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
                                      tx_chains, rx_chains);
 
        if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
-               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
+               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+                                    fw);
 
        /* 6GHz band. */
        sband = &data->bands[NL80211_BAND_6GHZ];
@@ -843,7 +870,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
                                          NL80211_BAND_6GHZ);
 
        if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
-               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
+               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+                                    fw);
        else
                sband->n_channels = 0;
        if (n_channels != n_used)
@@ -1154,7 +1182,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
 
        iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains,
-                       sbands_flags, false);
+                       sbands_flags, false, fw);
        data->calib_version = 255;
 
        return data;
@@ -1661,7 +1689,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
                        channel_profile,
                        nvm->valid_tx_ant & fw->valid_tx_ant,
                        nvm->valid_rx_ant & fw->valid_rx_ant,
-                       sbands_flags, v4);
+                       sbands_flags, v4, fw);
 
        iwl_free_resp(&hcmd);
        return nvm;
index 3ce77e4..9a9e714 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016 Intel Deutschland GmbH
  */
@@ -412,6 +412,8 @@ enum {
 #define UREG_DOORBELL_TO_ISR6_RESUME   BIT(19)
 #define UREG_DOORBELL_TO_ISR6_PNVM     BIT(20)
 
+#define CNVI_MBOX_C                    0xA3400C
+
 #define FSEQ_ERROR_CODE                        0xA340C8
 #define FSEQ_TOP_INIT_VERSION          0xA34038
 #define FSEQ_CNVIO_INIT_VERSION                0xA3403C
index bf569f8..0199d7a 100644 (file)
@@ -193,6 +193,7 @@ enum iwl_error_event_table_status {
        IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
        IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
        IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
+       IWL_ERROR_EVENT_TABLE_TCM = BIT(3),
 };
 
 /**
@@ -589,6 +590,8 @@ struct iwl_trans_ops {
        void (*debugfs_cleanup)(struct iwl_trans *trans);
        void (*sync_nmi)(struct iwl_trans *trans);
        int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
+       int (*set_reduce_power)(struct iwl_trans *trans,
+                               const void *data, u32 len);
        void (*interrupts)(struct iwl_trans *trans, bool enable);
 };
 
@@ -706,6 +709,7 @@ struct iwl_self_init_dram {
  * @trigger_tlv: array of pointers to triggers TLVs for debug
  * @lmac_error_event_table: addrs of lmacs error tables
  * @umac_error_event_table: addr of umac error table
+ * @tcm_error_event_table: address of TCM error table
  * @error_event_table_tlv_status: bitmap that indicates what error table
  *     pointers was recevied via TLV. uses enum &iwl_error_event_table_status
  * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
@@ -732,6 +736,7 @@ struct iwl_trans_debug {
 
        u32 lmac_error_event_table[2];
        u32 umac_error_event_table;
+       u32 tcm_error_event_table;
        unsigned int error_event_table_tlv_status;
 
        enum iwl_ini_cfg_state internal_ini_cfg;
@@ -957,6 +962,7 @@ struct iwl_trans {
        bool pm_support;
        bool ltr_enabled;
        u8 pnvm_loaded:1;
+       u8 reduce_power_loaded:1;
 
        const struct iwl_hcmd_arr *command_groups;
        int command_groups_size;
@@ -1420,6 +1426,20 @@ static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
        return 0;
 }
 
+static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
+                                            const void *data, u32 len)
+{
+       if (trans->ops->set_reduce_power) {
+               int ret = trans->ops->set_reduce_power(trans, data, len);
+
+               if (ret)
+                       return ret;
+       }
+
+       trans->reduce_power_loaded = true;
+       return 0;
+}
+
 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
 {
        return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
index 2e28cf2..6a259d8 100644 (file)
@@ -104,7 +104,7 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
 struct wowlan_key_data {
        struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
        struct iwl_wowlan_tkip_params_cmd *tkip;
-       struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd;
+       struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd;
        bool error, use_rsc_tsc, use_tkip, configure_keys;
        int wep_key_idx;
 };
@@ -393,14 +393,19 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
 }
 
 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif,
                                 struct cfg80211_wowlan *wowlan)
 {
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_wowlan_patterns_cmd *pattern_cmd;
        struct iwl_host_cmd cmd = {
                .id = WOWLAN_PATTERNS,
                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
        };
        int i, err;
+       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                       WOWLAN_PATTERNS,
+                                       IWL_FW_CMD_VER_UNKNOWN);
 
        if (!wowlan->n_patterns)
                return 0;
@@ -408,11 +413,13 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
        cmd.len[0] = sizeof(*pattern_cmd) +
                wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
 
-       pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+       pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL);
        if (!pattern_cmd)
                return -ENOMEM;
 
-       pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+       pattern_cmd->n_patterns = wowlan->n_patterns;
+       if (ver >= 3)
+               pattern_cmd->sta_id = mvmvif->ap_sta_id;
 
        for (i = 0; i < wowlan->n_patterns; i++) {
                int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
@@ -636,7 +643,6 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
                          struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
                          struct ieee80211_sta *ap_sta)
 {
-       int ret;
        struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
 
        /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
@@ -646,12 +652,16 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
        wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
                ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
 
-       /* Query the last used seqno and set it */
-       ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
-       if (ret < 0)
-               return ret;
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                 WOWLAN_CONFIGURATION, 0) < 6) {
+               /* Query the last used seqno and set it */
+               int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
 
-       wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+               if (ret < 0)
+                       return ret;
+
+               wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+       }
 
        iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
 
@@ -706,7 +716,8 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                                            struct ieee80211_vif *vif,
                                            u32 cmd_flags)
 {
-       struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {};
+       struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
+       struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd;
        struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
        bool unified = fw_has_capa(&mvm->fw->ucode_capa,
                                   IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -715,7 +726,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                .use_rsc_tsc = false,
                .tkip = &tkip_cmd,
                .use_tkip = false,
-               .kek_kck_cmd = &kek_kck_cmd,
+               .kek_kck_cmd = _kek_kck_cmd,
        };
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
@@ -809,13 +820,9 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                                                IWL_ALWAYS_LONG_GROUP,
                                                WOWLAN_KEK_KCK_MATERIAL,
                                                IWL_FW_CMD_VER_UNKNOWN);
-               if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 &&
+               if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
                            cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
                        return -EINVAL;
-               if (cmd_ver == 3)
-                       cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
-               else
-                       cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
 
                memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
                       mvmvif->rekey_data.kck_len);
@@ -825,6 +832,21 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
                kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
                kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
+               kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id);
+
+               if (cmd_ver == 4) {
+                       cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
+               } else {
+                       if (cmd_ver == 3)
+                               cmd_size =
+                                       sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
+                       else
+                               cmd_size =
+                                       sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
+                       /* skip the sta_id at the beginning */
+                       _kek_kck_cmd = (void *)
+                               ((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
+               }
 
                IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
                                 mvmvif->rekey_data.akm);
@@ -832,7 +854,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                ret = iwl_mvm_send_cmd_pdu(mvm,
                                           WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
                                           cmd_size,
-                                          &kek_kck_cmd);
+                                          _kek_kck_cmd);
                if (ret)
                        goto out;
        }
@@ -884,7 +906,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
 
        if (fw_has_api(&mvm->fw->ucode_capa,
                       IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
-               ret = iwl_mvm_send_patterns(mvm, wowlan);
+               ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
        else
                ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
        if (ret)
@@ -1534,9 +1556,12 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
        }
 
 out:
-       mvmvif->seqno_valid = true;
-       /* +0x10 because the set API expects next-to-use, not last-used */
-       mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+       if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+                                   WOWLAN_GET_STATUSES, 0) < 10) {
+               mvmvif->seqno_valid = true;
+               /* +0x10 because the set API expects next-to-use, not last-used */
+               mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+       }
 
        return true;
 }
@@ -1587,15 +1612,27 @@ iwl_mvm_parse_wowlan_status_common(v6)
 iwl_mvm_parse_wowlan_status_common(v7)
 iwl_mvm_parse_wowlan_status_common(v9)
 
-struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
+static struct iwl_wowlan_status *
+iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
 {
        struct iwl_wowlan_status *status;
+       struct iwl_wowlan_get_status_cmd get_status_cmd = {
+               .sta_id = cpu_to_le32(sta_id),
+       };
        struct iwl_host_cmd cmd = {
                .id = WOWLAN_GET_STATUSES,
                .flags = CMD_WANT_SKB,
+               .data = { &get_status_cmd, },
+               .len = { sizeof(get_status_cmd), },
        };
        int ret, len;
        u8 notif_ver;
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                          WOWLAN_GET_STATUSES,
+                                          IWL_FW_CMD_VER_UNKNOWN);
+
+       if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+               cmd.len[0] = 0;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -1608,8 +1645,11 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
        len = iwl_rx_packet_payload_len(cmd.resp_pkt);
 
        /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
-       notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
-                                           WOWLAN_GET_STATUSES, 7);
+       notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+                                           WOWLAN_GET_STATUSES, 0);
+       if (!notif_ver)
+               notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+                                                   WOWLAN_GET_STATUSES, 7);
 
        if (!fw_has_api(&mvm->fw->ucode_capa,
                        IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
@@ -1654,7 +1694,7 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
 
                status->gtk[0] = v7->gtk[0];
                status->igtk[0] = v7->igtk[0];
-       } else if (notif_ver == 9) {
+       } else if (notif_ver == 9 || notif_ver == 10) {
                struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
 
                status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
@@ -1680,29 +1720,37 @@ out_free_resp:
 }
 
 static struct iwl_wowlan_status *
-iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
 {
-       int ret;
-
-       /* only for tracing for now */
-       ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
-       if (ret)
-               IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                          OFFLOADS_QUERY_CMD,
+                                          IWL_FW_CMD_VER_UNKNOWN);
+       __le32 station_id = cpu_to_le32(sta_id);
+       u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
+
+       if (!mvm->net_detect) {
+               /* only for tracing for now */
+               int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0,
+                                              cmd_size, &station_id);
+               if (ret)
+                       IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+       }
 
-       return iwl_mvm_send_wowlan_get_status(mvm);
+       return iwl_mvm_send_wowlan_get_status(mvm, sta_id);
 }
 
 /* releases the MVM mutex */
 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *vif)
 {
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_wowlan_status_data status;
        struct iwl_wowlan_status *fw_status;
        int i;
        bool keep;
        struct iwl_mvm_sta *mvm_ap_sta;
 
-       fw_status = iwl_mvm_get_wakeup_status(mvm);
+       fw_status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
        if (IS_ERR_OR_NULL(fw_status))
                goto out_unlock;
 
@@ -1880,7 +1928,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
        u32 reasons = 0;
        int i, n_matches, ret;
 
-       fw_status = iwl_mvm_get_wakeup_status(mvm);
+       fw_status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
        if (!IS_ERR_OR_NULL(fw_status)) {
                reasons = le32_to_cpu(fw_status->wakeup_reasons);
                kfree(fw_status);
index 38d0bfb..7d9faef 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -460,7 +460,7 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
        int pos = 0;
 
        mutex_lock(&mvm->mutex);
-       iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+       iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &curr_os, NULL);
        mutex_unlock(&mvm->mutex);
 
        do_div(curr_os, NSEC_PER_USEC);
index 63d6501..95f883a 100644 (file)
@@ -1023,7 +1023,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
                mvm->fw_restart++;
 
        /* take the return value to make compiler happy - it will fail anyway */
-       ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
+       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                  WIDE_ID(LONG_GROUP, REPLY_ERROR),
+                                  0, 0, NULL);
 
        mutex_unlock(&mvm->mutex);
 
index a456b8a..59cef0d 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include <linux/etherdevice.h>
 #include <linux/math64.h>
@@ -430,6 +430,10 @@ iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
                FTM_PUT_FLAG(TB);
        else if (peer->ftm.non_trigger_based)
                FTM_PUT_FLAG(NON_TB);
+
+       if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
+           peer->ftm.lmr_feedback)
+               FTM_PUT_FLAG(LMR_FEEDBACK);
 }
 
 static int
@@ -879,7 +883,8 @@ static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
        u32 curr_gp2, diff;
        u64 now_from_boot_ns;
 
-       iwl_mvm_get_sync_time(mvm, &curr_gp2, &now_from_boot_ns);
+       iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
+                             &now_from_boot_ns, NULL);
 
        if (curr_gp2 >= gp2_ts)
                diff = curr_gp2 - gp2_ts;
index 8aa5f1a..38fd588 100644 (file)
@@ -1139,19 +1139,34 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
 
 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
 {
-       int cmd_ret;
-       struct iwl_lari_config_change_cmd_v3 cmd = {};
+       int ret;
+       u32 value;
+       struct iwl_lari_config_change_cmd_v4 cmd = {};
 
        cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
 
+       ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0, DSM_FUNC_11AX_ENABLEMENT,
+                                  &iwl_guid, &value);
+       if (!ret)
+               cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
        /* apply more config masks here */
 
-       if (cmd.config_bitmap) {
+       ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0,
+                                  DSM_FUNC_ENABLE_UNII4_CHAN,
+                                  &iwl_guid, &value);
+       if (!ret)
+               cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
+
+       if (cmd.config_bitmap ||
+           cmd.oem_11ax_allow_bitmap ||
+           cmd.oem_unii4_allow_bitmap) {
                size_t cmd_size;
                u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
                                                   REGULATORY_AND_NVM_GROUP,
                                                   LARI_CONFIG_CHANGE, 1);
-               if (cmd_ver == 3)
+               if (cmd_ver == 4)
+                       cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
+               else if (cmd_ver == 3)
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
                else if (cmd_ver == 2)
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
@@ -1159,16 +1174,21 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
 
                IWL_DEBUG_RADIO(mvm,
-                               "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
-                               le32_to_cpu(cmd.config_bitmap));
-               cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
-                                              WIDE_ID(REGULATORY_AND_NVM_GROUP,
-                                                      LARI_CONFIG_CHANGE),
-                                              0, cmd_size, &cmd);
-               if (cmd_ret < 0)
+                               "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+                               le32_to_cpu(cmd.config_bitmap),
+                               le32_to_cpu(cmd.oem_11ax_allow_bitmap));
+               IWL_DEBUG_RADIO(mvm,
+                               "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, cmd_ver=%d\n",
+                               le32_to_cpu(cmd.oem_unii4_allow_bitmap),
+                               cmd_ver);
+               ret = iwl_mvm_send_cmd_pdu(mvm,
+                                          WIDE_ID(REGULATORY_AND_NVM_GROUP,
+                                                  LARI_CONFIG_CHANGE),
+                                          0, cmd_size, &cmd);
+               if (ret < 0)
                        IWL_DEBUG_RADIO(mvm,
                                        "Failed to send LARI_CONFIG_CHANGE (%d)\n",
-                                       cmd_ret);
+                                       ret);
        }
 }
 #else /* CONFIG_ACPI */
index cc78f30..70ebecb 100644 (file)
@@ -3800,6 +3800,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct cfg80211_chan_def chandef;
        struct iwl_mvm_phy_ctxt *phy_ctxt;
+       bool band_change_removal;
        int ret, i;
 
        IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
@@ -3880,19 +3881,30 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
        cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
 
        /*
-        * Change the PHY context configuration as it is currently referenced
-        * only by the P2P Device MAC
+        * Check if the remain-on-channel is on a different band and that
+        * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
+        * so, we'll need to release and then re-configure here, since we
+        * must not remove a PHY context that's part of a binding.
         */
-       if (mvmvif->phy_ctxt->ref == 1) {
+       band_change_removal =
+               fw_has_capa(&mvm->fw->ucode_capa,
+                           IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+               mvmvif->phy_ctxt->channel->band != chandef.chan->band;
+
+       if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
+               /*
+                * Change the PHY context configuration as it is currently
+                * referenced only by the P2P Device MAC (and we can modify it)
+                */
                ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
                                               &chandef, 1, 1);
                if (ret)
                        goto out_unlock;
        } else {
                /*
-                * The PHY context is shared with other MACs. Need to remove the
-                * P2P Device from the binding, allocate an new PHY context and
-                * create a new binding
+                * The PHY context is shared with other MACs (or we're trying to
+                * switch bands), so remove the P2P Device from the binding,
+                * allocate an new PHY context and create a new binding.
                 */
                phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
                if (!phy_ctxt) {
@@ -4211,7 +4223,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
        struct ieee80211_vif *disabled_vif = NULL;
 
        lockdep_assert_held(&mvm->mutex);
-
        iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
 
        switch (vif->type) {
index 4d9d4d6..b50942f 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/thermal.h>
 #endif
 
+#include <linux/ktime.h>
+
 #include "iwl-op-mode.h"
 #include "iwl-trans.h"
 #include "fw/notif-wait.h"
@@ -195,6 +197,7 @@ enum iwl_mvm_smps_type_request {
        IWL_MVM_SMPS_REQ_BT_COEX,
        IWL_MVM_SMPS_REQ_TT,
        IWL_MVM_SMPS_REQ_PROT,
+       IWL_MVM_SMPS_REQ_FW,
        NUM_IWL_MVM_SMPS_REQ,
 };
 
@@ -991,6 +994,8 @@ struct iwl_mvm {
         */
        bool temperature_test;  /* Debug test temperature is enabled */
 
+       bool fw_static_smps_request;
+
        unsigned long bt_coex_last_tcm_ts;
        struct iwl_mvm_tcm tcm;
 
@@ -1447,10 +1452,16 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
                               struct ieee80211_tx_rate *r);
 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+
+static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+       iwl_fwrt_dump_error_logs(&mvm->fwrt);
+}
+
 u8 first_antenna(u8 mask);
 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
-void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
+                          u64 *boottime, ktime_t *realtime);
 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
 
 /* Tx / Host Commands */
@@ -1769,7 +1780,6 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif, int idx);
 extern const struct file_operations iwl_dbgfs_d3_test_ops;
-struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm);
 #ifdef CONFIG_PM
 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
                                 struct ieee80211_vif *vif);
@@ -1827,7 +1837,9 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                enum iwl_mvm_smps_type_request req_type,
                                enum ieee80211_smps_mode smps_request);
-bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif);
 
 /* Low latency */
 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
index 1cc90e6..4188051 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2012-2014, 2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015 Intel Deutschland GmbH
  */
@@ -36,7 +36,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
                struct iwl_proto_offload_cmd_v1 v1;
                struct iwl_proto_offload_cmd_v2 v2;
                struct iwl_proto_offload_cmd_v3_small v3s;
-               struct iwl_proto_offload_cmd_v3_large v3l;
+               struct iwl_proto_offload_cmd_v4 v4;
        } cmd = {};
        struct iwl_host_cmd hcmd = {
                .id = PROT_OFFLOAD_CONFIG_CMD,
@@ -47,6 +47,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
        struct iwl_proto_offload_cmd_common *common;
        u32 enabled = 0, size;
        u32 capa_flags = mvm->fw->ucode_capa.flags;
+       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                       PROT_OFFLOAD_CONFIG_CMD, 0);
+
 #if IS_ENABLED(CONFIG_IPV6)
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int i;
@@ -72,9 +75,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
                        addrs = cmd.v3s.targ_addrs;
                        n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
                } else {
-                       nsc = cmd.v3l.ns_config;
+                       nsc = cmd.v4.ns_config;
                        n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
-                       addrs = cmd.v3l.targ_addrs;
+                       addrs = cmd.v4.targ_addrs;
                        n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
                }
 
@@ -116,7 +119,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
                        cmd.v3s.num_valid_ipv6_addrs =
                                cpu_to_le32(i - num_skipped);
                else
-                       cmd.v3l.num_valid_ipv6_addrs =
+                       cmd.v4.num_valid_ipv6_addrs =
                                cpu_to_le32(i - num_skipped);
        } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
                bool found = false;
@@ -171,8 +174,17 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
                common = &cmd.v3s.common;
                size = sizeof(cmd.v3s);
        } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
-               common = &cmd.v3l.common;
-               size = sizeof(cmd.v3l);
+               common = &cmd.v4.common;
+               size = sizeof(cmd.v4);
+               if (ver < 4) {
+                       /*
+                        * This basically uses iwl_proto_offload_cmd_v3_large
+                        * which doesn't have the sta_id parameter before the
+                        * common part.
+                        */
+                       size -= sizeof(cmd.v4.sta_id);
+                       hcmd.data[0] = common;
+               }
        } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
                common = &cmd.v2.common;
                size = sizeof(cmd.v2);
index ebed82c..20e8d34 100644 (file)
@@ -210,6 +210,39 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
        ieee80211_disconnect(vif, true);
 }
 
+void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+
+       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW,
+                           mvm->fw_static_smps_request ?
+                               IEEE80211_SMPS_STATIC :
+                               IEEE80211_SMPS_AUTOMATIC);
+}
+
+static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
+                                       struct ieee80211_vif *vif)
+{
+       iwl_mvm_apply_fw_smps_request(vif);
+}
+
+static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
+
+       /*
+        * We could pass it to the iterator data, but also need to remember
+        * it for new interfaces that are added while in this state.
+        */
+       mvm->fw_static_smps_request =
+               req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
+       ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                    iwl_mvm_intf_dual_chain_req, NULL);
+}
+
 /**
  * enum iwl_rx_handler_context context for Rx handler
  * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
@@ -358,6 +391,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
                       iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
                       struct iwl_datapath_monitor_notif),
+
+       RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
+                      iwl_mvm_rx_thermal_dual_chain_req,
+                      RX_HANDLER_ASYNC_LOCKED,
+                      struct iwl_thermal_dual_chain_request),
 };
 #undef RX_HANDLER
 #undef RX_HANDLER_GRP
@@ -445,7 +483,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
        HCMD_NAME(D3_CONFIG_CMD),
        HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
        HCMD_NAME(OFFLOADS_QUERY_CMD),
-       HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
        HCMD_NAME(MATCH_FOUND_NOTIFICATION),
        HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
        HCMD_NAME(WOWLAN_PATTERNS),
@@ -503,6 +540,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
        HCMD_NAME(TLC_MNG_CONFIG_CMD),
        HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
        HCMD_NAME(MONITOR_NOTIF),
+       HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
        HCMD_NAME(STA_PM_NOTIF),
        HCMD_NAME(MU_GROUP_MGMT_NOTIF),
        HCMD_NAME(RX_QUEUES_NOTIFICATION),
index 0fd51f6..035336a 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2017 Intel Deutschland GmbH
  */
@@ -76,6 +76,7 @@ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
 }
 
 static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+                                        struct iwl_mvm_phy_ctxt *ctxt,
                                         __le32 *rxchain_info,
                                         u8 chains_static,
                                         u8 chains_dynamic)
@@ -93,11 +94,22 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
         * between the two antennas is sufficiently different to impact
         * performance.
         */
-       if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
+       if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) {
                idle_cnt = 2;
                active_cnt = 2;
        }
 
+       /*
+        * If the firmware requested it, then we know that it supports
+        * getting zero for the values to indicate "use one, but pick
+        * which one yourself", which means it can dynamically pick one
+        * that e.g. has better RSSI.
+        */
+       if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
+               idle_cnt = 0;
+               active_cnt = 0;
+       }
+
        *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
                                        PHY_RX_CHAIN_VALID_POS);
        *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@@ -113,6 +125,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
  * Add the phy configuration to the PHY context command
  */
 static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
+                                        struct iwl_mvm_phy_ctxt *ctxt,
                                         struct iwl_phy_context_cmd_v1 *cmd,
                                         struct cfg80211_chan_def *chandef,
                                         u8 chains_static, u8 chains_dynamic)
@@ -123,7 +136,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
        /* Set the channel info data */
        iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
 
-       iwl_mvm_phy_ctxt_set_rxchain(mvm, &tail->rxchain_info,
+       iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info,
                                     chains_static, chains_dynamic);
 
        tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -133,6 +146,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
  * Add the phy configuration to the PHY context command
  */
 static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+                                     struct iwl_mvm_phy_ctxt *ctxt,
                                      struct iwl_phy_context_cmd *cmd,
                                      struct cfg80211_chan_def *chandef,
                                      u8 chains_static, u8 chains_dynamic)
@@ -143,7 +157,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
        /* Set the channel info data */
        iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
 
-       iwl_mvm_phy_ctxt_set_rxchain(mvm, &cmd->rxchain_info,
+       iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
                                     chains_static, chains_dynamic);
 }
 
@@ -170,7 +184,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
                iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action);
 
                /* Set the command data */
-               iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+               iwl_mvm_phy_ctxt_cmd_data(mvm, ctxt, &cmd, chandef,
                                          chains_static,
                                          chains_dynamic);
 
@@ -186,7 +200,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
                                         action);
 
                /* Set the command data */
-               iwl_mvm_phy_ctxt_cmd_data_v1(mvm, &cmd, chandef,
+               iwl_mvm_phy_ctxt_cmd_data_v1(mvm, ctxt, &cmd, chandef,
                                             chains_static,
                                             chains_dynamic);
                ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
index 8e26422..c0babb8 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -2001,8 +2001,10 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
        struct sk_buff *skb;
        u8 channel, energy_a, energy_b;
        struct iwl_mvm_rx_phy_data phy_data = {
+               .info_type = le32_get_bits(desc->phy_info[1],
+                                          IWL_RX_PHY_DATA1_INFO_TYPE_MASK),
                .d0 = desc->phy_info[0],
-               .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
+               .d1 = desc->phy_info[1],
        };
 
        if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
@@ -2015,10 +2017,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
        energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
        channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
 
-       phy_data.info_type =
-               le32_get_bits(desc->phy_info[1],
-                             IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
-
        /* Dont use dev_alloc_skb(), we'll have enough headroom once
         * ieee80211_hdr pulled.
         */
index 5a0696c..0368b71 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -2327,9 +2327,9 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                             &scan_p->general_params,
                                             gen_flags);
 
-        ret = iwl_mvm_fill_scan_sched_params(params,
-                                             scan_p->periodic_params.schedule,
-                                             &scan_p->periodic_params.delay);
+       ret = iwl_mvm_fill_scan_sched_params(params,
+                                            scan_p->periodic_params.schedule,
+                                            &scan_p->periodic_params.delay);
        if (ret)
                return ret;
 
@@ -2362,9 +2362,9 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                             &scan_p->general_params,
                                             gen_flags);
 
-        ret = iwl_mvm_fill_scan_sched_params(params,
-                                             scan_p->periodic_params.schedule,
-                                             &scan_p->periodic_params.delay);
+       ret = iwl_mvm_fill_scan_sched_params(params,
+                                            scan_p->periodic_params.schedule,
+                                            &scan_p->periodic_params.delay);
        if (ret)
                return ret;
 
index f618368..9c45a64 100644 (file)
@@ -3794,8 +3794,12 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
 
        mvm_sta->disable_tx = disable;
 
-       /* Tell mac80211 to start/stop queuing tx for this station */
-       ieee80211_sta_block_awake(mvm->hw, sta, disable);
+       /*
+        * If sta PS state is handled by mac80211, tell it to start/stop
+        * queuing tx for this station.
+        */
+       if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
+               ieee80211_sta_block_awake(mvm->hw, sta, disable);
 
        iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
 
index 83342a6..d3307a1 100644 (file)
@@ -31,6 +31,13 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
                return;
 
        list_del(&te_data->list);
+
+       /*
+        * the list is only used for AUX ROC events so make sure it is always
+        * initialized
+        */
+       INIT_LIST_HEAD(&te_data->list);
+
        te_data->running = false;
        te_data->uid = 0;
        te_data->id = TE_MAX;
@@ -310,6 +317,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
                         * and know the dtim period.
                         */
                        iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+                               !te_data->vif->bss_conf.assoc ?
+                               "Not associated and the time event is over already..." :
                                "No beacon heard and the time event is over already...");
                        break;
                default:
@@ -607,14 +616,15 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 }
 
 static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
-                                             struct iwl_mvm_vif *mvmvif)
+                                             struct iwl_mvm_vif *mvmvif,
+                                             u32 id)
 {
        struct iwl_mvm_session_prot_cmd cmd = {
                .id_and_color =
                        cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
                                                        mvmvif->color)),
                .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
-               .conf_id = cpu_to_le32(mvmvif->time_event_data.id),
+               .conf_id = cpu_to_le32(id),
        };
        int ret;
 
@@ -632,6 +642,12 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
 {
        u32 id;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+       enum nl80211_iftype iftype;
+
+       if (!te_data->vif)
+               return false;
+
+       iftype = te_data->vif->type;
 
        /*
         * It is possible that by the time we got to this point the time
@@ -656,8 +672,8 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                        IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
                if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
                        /* Session protection is still ongoing. Cancel it */
-                       iwl_mvm_cancel_session_protection(mvm, mvmvif);
-                       if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+                       iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
+                       if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
                                set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
                                iwl_mvm_roc_finished(mvm);
                        }
@@ -738,11 +754,6 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                IWL_ERR(mvm, "Couldn't remove the time event\n");
 }
 
-/*
- * When the firmware supports the session protection API,
- * this is not needed since it'll automatically remove the
- * session protection after association + beacon reception.
- */
 void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
                                     struct ieee80211_vif *vif)
 {
@@ -756,7 +767,15 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
        id = te_data->id;
        spin_unlock_bh(&mvm->time_event_lock);
 
-       if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+               if (id != SESSION_PROTECT_CONF_ASSOC) {
+                       IWL_DEBUG_TE(mvm,
+                                    "don't remove session protection id=%u\n",
+                                    id);
+                       return;
+               }
+       } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
                IWL_DEBUG_TE(mvm,
                             "don't remove TE with id=%u (not session protection)\n",
                             id);
@@ -808,6 +827,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
                         * and know the dtim period.
                         */
                        iwl_mvm_te_check_disconnect(mvm, vif,
+                                                   !vif->bss_conf.assoc ?
+                                                   "Not associated and the session protection is over already..." :
                                                    "No beacon heard and the session protection is over already...");
                        spin_lock_bh(&mvm->time_event_lock);
                        iwl_mvm_te_clear_data(mvm, te_data);
@@ -981,7 +1002,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
                if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-                       iwl_mvm_cancel_session_protection(mvm, mvmvif);
+                       iwl_mvm_cancel_session_protection(mvm, mvmvif,
+                                                         mvmvif->time_event_data.id);
                        set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
                } else {
                        iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
@@ -1141,6 +1163,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
 
        iwl_mvm_te_clear_data(mvm, te_data);
        te_data->duration = le32_to_cpu(cmd.duration_tu);
+       te_data->vif = vif;
        spin_unlock_bh(&mvm->time_event_lock);
 
        IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
index c566be9..4a3d297 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -238,316 +238,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
        return last_idx;
 }
 
-/*
- * Note: This structure is read from the device with IO accesses,
- * and the reading already does the endian conversion. As it is
- * read with u32-sized accesses, any members with a different size
- * need to be ordered correctly though!
- */
-struct iwl_error_event_table_v1 {
-       u32 valid;              /* (nonzero) valid, (0) log is empty */
-       u32 error_id;           /* type of error */
-       u32 pc;                 /* program counter */
-       u32 blink1;             /* branch link */
-       u32 blink2;             /* branch link */
-       u32 ilink1;             /* interrupt link */
-       u32 ilink2;             /* interrupt link */
-       u32 data1;              /* error-specific data */
-       u32 data2;              /* error-specific data */
-       u32 data3;              /* error-specific data */
-       u32 bcon_time;          /* beacon timer */
-       u32 tsf_low;            /* network timestamp function timer */
-       u32 tsf_hi;             /* network timestamp function timer */
-       u32 gp1;                /* GP1 timer register */
-       u32 gp2;                /* GP2 timer register */
-       u32 gp3;                /* GP3 timer register */
-       u32 ucode_ver;          /* uCode version */
-       u32 hw_ver;             /* HW Silicon version */
-       u32 brd_ver;            /* HW board version */
-       u32 log_pc;             /* log program counter */
-       u32 frame_ptr;          /* frame pointer */
-       u32 stack_ptr;          /* stack pointer */
-       u32 hcmd;               /* last host command header */
-       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
-                                * rxtx_flag */
-       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
-                                * host_flag */
-       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
-                                * enc_flag */
-       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
-                                * time_flag */
-       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
-                                * wico interrupt */
-       u32 isr_pref;           /* isr status register LMPM_NIC_PREF_STAT */
-       u32 wait_event;         /* wait event() caller address */
-       u32 l2p_control;        /* L2pControlField */
-       u32 l2p_duration;       /* L2pDurationField */
-       u32 l2p_mhvalid;        /* L2pMhValidBits */
-       u32 l2p_addr_match;     /* L2pAddrMatchStat */
-       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
-                                * (LMPM_PMG_SEL) */
-       u32 u_timestamp;        /* indicate when the date and time of the
-                                * compilation */
-       u32 flow_handler;       /* FH read/write pointers, RX credit */
-} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
-
-struct iwl_error_event_table {
-       u32 valid;              /* (nonzero) valid, (0) log is empty */
-       u32 error_id;           /* type of error */
-       u32 trm_hw_status0;     /* TRM HW status */
-       u32 trm_hw_status1;     /* TRM HW status */
-       u32 blink2;             /* branch link */
-       u32 ilink1;             /* interrupt link */
-       u32 ilink2;             /* interrupt link */
-       u32 data1;              /* error-specific data */
-       u32 data2;              /* error-specific data */
-       u32 data3;              /* error-specific data */
-       u32 bcon_time;          /* beacon timer */
-       u32 tsf_low;            /* network timestamp function timer */
-       u32 tsf_hi;             /* network timestamp function timer */
-       u32 gp1;                /* GP1 timer register */
-       u32 gp2;                /* GP2 timer register */
-       u32 fw_rev_type;        /* firmware revision type */
-       u32 major;              /* uCode version major */
-       u32 minor;              /* uCode version minor */
-       u32 hw_ver;             /* HW Silicon version */
-       u32 brd_ver;            /* HW board version */
-       u32 log_pc;             /* log program counter */
-       u32 frame_ptr;          /* frame pointer */
-       u32 stack_ptr;          /* stack pointer */
-       u32 hcmd;               /* last host command header */
-       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
-                                * rxtx_flag */
-       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
-                                * host_flag */
-       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
-                                * enc_flag */
-       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
-                                * time_flag */
-       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
-                                * wico interrupt */
-       u32 last_cmd_id;        /* last HCMD id handled by the firmware */
-       u32 wait_event;         /* wait event() caller address */
-       u32 l2p_control;        /* L2pControlField */
-       u32 l2p_duration;       /* L2pDurationField */
-       u32 l2p_mhvalid;        /* L2pMhValidBits */
-       u32 l2p_addr_match;     /* L2pAddrMatchStat */
-       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
-                                * (LMPM_PMG_SEL) */
-       u32 u_timestamp;        /* indicate when the date and time of the
-                                * compilation */
-       u32 flow_handler;       /* FH read/write pointers, RX credit */
-} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
-
-/*
- * UMAC error struct - relevant starting from family 8000 chip.
- * Note: This structure is read from the device with IO accesses,
- * and the reading already does the endian conversion. As it is
- * read with u32-sized accesses, any members with a different size
- * need to be ordered correctly though!
- */
-struct iwl_umac_error_event_table {
-       u32 valid;              /* (nonzero) valid, (0) log is empty */
-       u32 error_id;           /* type of error */
-       u32 blink1;             /* branch link */
-       u32 blink2;             /* branch link */
-       u32 ilink1;             /* interrupt link */
-       u32 ilink2;             /* interrupt link */
-       u32 data1;              /* error-specific data */
-       u32 data2;              /* error-specific data */
-       u32 data3;              /* error-specific data */
-       u32 umac_major;
-       u32 umac_minor;
-       u32 frame_pointer;      /* core register 27*/
-       u32 stack_pointer;      /* core register 28 */
-       u32 cmd_header;         /* latest host cmd sent to UMAC */
-       u32 nic_isr_pref;       /* ISR status register */
-} __packed;
-
-#define ERROR_START_OFFSET  (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
-
-static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
-{
-       struct iwl_trans *trans = mvm->trans;
-       struct iwl_umac_error_event_table table = {};
-       u32 base = mvm->trans->dbg.umac_error_event_table;
-
-       if (!base &&
-           !(mvm->trans->dbg.error_event_table_tlv_status &
-             IWL_ERROR_EVENT_TABLE_UMAC))
-               return;
-
-       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
-       if (table.valid)
-               mvm->fwrt.dump.umac_err_id = table.error_id;
-
-       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
-               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
-               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
-                       mvm->status, table.valid);
-       }
-
-       IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
-               iwl_fw_lookup_assert_desc(table.error_id));
-       IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
-       IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
-       IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
-       IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
-       IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
-       IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
-       IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
-       IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
-       IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
-       IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
-       IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
-       IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
-       IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
-}
-
-static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
-{
-       struct iwl_trans *trans = mvm->trans;
-       struct iwl_error_event_table table = {};
-       u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num];
-
-       if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
-               if (!base)
-                       base = mvm->fw->init_errlog_ptr;
-       } else {
-               if (!base)
-                       base = mvm->fw->inst_errlog_ptr;
-       }
-
-       if (base < 0x400000) {
-               IWL_ERR(mvm,
-                       "Not valid error log pointer 0x%08X for %s uCode\n",
-                       base,
-                       (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
-                       ? "Init" : "RT");
-               return;
-       }
-
-       /* check if there is a HW error */
-       val = iwl_trans_read_mem32(trans, base);
-       if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
-               int err;
-
-               IWL_ERR(trans, "HW error, resetting before reading\n");
-
-               /* reset the device */
-               iwl_trans_sw_reset(trans);
-
-               err = iwl_finish_nic_init(trans, trans->trans_cfg);
-               if (err)
-                       return;
-       }
-
-       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
-       if (table.valid)
-               mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
-
-       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
-               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
-               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
-                       mvm->status, table.valid);
-       }
-
-       /* Do not change this output - scripts rely on it */
-
-       IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
-
-       IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
-               iwl_fw_lookup_assert_desc(table.error_id));
-       IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
-       IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
-       IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
-       IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
-       IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
-       IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
-       IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
-       IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
-       IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
-       IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
-       IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
-       IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
-       IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
-       IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
-       IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
-       IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
-       IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
-       IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
-       IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
-       IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
-       IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
-       IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
-       IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
-       IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
-       IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
-       IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
-       IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
-       IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
-       IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
-       IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
-       IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
-       IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
-       IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
-}
-
-static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm)
-{
-       struct iwl_trans *trans = mvm->trans;
-       u32 error, data1;
-
-       if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
-               error = UMAG_SB_CPU_2_STATUS;
-               data1 = UMAG_SB_CPU_1_STATUS;
-       } else if (mvm->trans->trans_cfg->device_family >=
-                  IWL_DEVICE_FAMILY_8000) {
-               error = SB_CPU_2_STATUS;
-               data1 = SB_CPU_1_STATUS;
-       } else {
-               return;
-       }
-
-       error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
-
-       IWL_ERR(trans, "IML/ROM dump:\n");
-
-       if (error & 0xFFFF0000)
-               IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
-
-       IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error);
-       IWL_ERR(mvm, "0x%08X | IML/ROM data1\n",
-               iwl_read_umac_prph(trans, data1));
-
-       if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
-               IWL_ERR(mvm, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
-                       iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
-}
-
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
-{
-       if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
-               IWL_ERR(mvm,
-                       "DEVICE_ENABLED bit is not set. Aborting dump.\n");
-               return;
-       }
-
-       iwl_mvm_dump_lmac_error_log(mvm, 0);
-
-       if (mvm->trans->dbg.lmac_error_event_table[1])
-               iwl_mvm_dump_lmac_error_log(mvm, 1);
-
-       iwl_mvm_dump_umac_error_log(mvm);
-
-       iwl_mvm_dump_iml_error_log(mvm);
-
-       iwl_fw_error_print_fseq_regs(&mvm->fwrt);
-}
-
 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
                         int tid, int frame_limit, u16 ssn)
 {
@@ -621,7 +311,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                         enum ieee80211_smps_mode smps_request)
 {
        struct iwl_mvm_vif *mvmvif;
-       enum ieee80211_smps_mode smps_mode;
+       enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
        int i;
 
        lockdep_assert_held(&mvm->mutex);
@@ -630,10 +320,8 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
                return;
 
-       if (vif->type == NL80211_IFTYPE_AP)
-               smps_mode = IEEE80211_SMPS_OFF;
-       else
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
 
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
        mvmvif->smps_requests[req_type] = smps_request;
@@ -683,23 +371,37 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
        mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
 }
 
+struct iwl_mvm_diversity_iter_data {
+       struct iwl_mvm_phy_ctxt *ctxt;
+       bool result;
+};
+
 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
                                   struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       bool *result = _data;
+       struct iwl_mvm_diversity_iter_data *data = _data;
        int i;
 
+       if (mvmvif->phy_ctxt != data->ctxt)
+               return;
+
        for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
                if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
-                   mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
-                       *result = false;
+                   mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
+                       data->result = false;
+                       break;
+               }
        }
 }
 
-bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_phy_ctxt *ctxt)
 {
-       bool result = true;
+       struct iwl_mvm_diversity_iter_data data = {
+               .ctxt = ctxt,
+               .result = true,
+       };
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -711,9 +413,9 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
 
        ieee80211_iterate_active_interfaces_atomic(
                        mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                       iwl_mvm_diversity_iter, &result);
+                       iwl_mvm_diversity_iter, &data);
 
-       return result;
+       return data.result;
 }
 
 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
@@ -1398,7 +1100,8 @@ u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
        return iwl_read_prph(mvm->trans, reg_addr);
 }
 
-void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
+                          u32 *gp2, u64 *boottime, ktime_t *realtime)
 {
        bool ps_disabled;
 
@@ -1412,7 +1115,11 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
        }
 
        *gp2 = iwl_mvm_get_systime(mvm);
-       *boottime = ktime_get_boottime_ns();
+
+       if (clock_type == CLOCK_BOOTTIME && boottime)
+               *boottime = ktime_get_boottime_ns();
+       else if (clock_type == CLOCK_REALTIME && realtime)
+               *realtime = ktime_get_real();
 
        if (!ps_disabled) {
                mvm->ps_disabled = ps_disabled;
index cecc32e..239a722 100644 (file)
@@ -79,7 +79,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        struct iwl_prph_scratch *prph_scratch;
        struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
        struct iwl_prph_info *prph_info;
-       void *iml_img;
        u32 control_flags = 0;
        int ret;
        int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -138,8 +137,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
 
        /* Allocate prph information
         * currently we don't assign to the prph info anything, but it would get
-        * assigned later */
-       prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
+        * assigned later
+        *
+        * We also use the second half of this page to give the device some
+        * dummy TR/CR tail pointers - which shouldn't be necessary as we don't
+        * use this, but the hardware still reads/writes there and we can't let
+        * it go do that with a NULL pointer.
+        */
+       BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
+       prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
                                       &trans_pcie->prph_info_dma_addr,
                                       GFP_KERNEL);
        if (!prph_info) {
@@ -166,13 +172,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        ctxt_info_gen3->cr_head_idx_arr_base_addr =
                cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
        ctxt_info_gen3->tr_tail_idx_arr_base_addr =
-               cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+               cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
        ctxt_info_gen3->cr_tail_idx_arr_base_addr =
-               cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
-       ctxt_info_gen3->cr_idx_arr_size =
-               cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
-       ctxt_info_gen3->tr_idx_arr_size =
-               cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+               cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
        ctxt_info_gen3->mtr_base_addr =
                cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
        ctxt_info_gen3->mcr_base_addr =
@@ -187,14 +189,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        trans_pcie->prph_scratch = prph_scratch;
 
        /* Allocate IML */
-       iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
-                                    &trans_pcie->iml_dma_addr, GFP_KERNEL);
-       if (!iml_img) {
+       trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
+                                            &trans_pcie->iml_dma_addr,
+                                            GFP_KERNEL);
+       if (!trans_pcie->iml) {
                ret = -ENOMEM;
                goto err_free_ctxt_info;
        }
 
-       memcpy(iml_img, trans->iml, trans->iml_len);
+       memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
 
        iwl_enable_fw_load_int_ctx_info(trans);
 
@@ -216,10 +219,8 @@ err_free_ctxt_info:
                          trans_pcie->ctxt_info_dma_addr);
        trans_pcie->ctxt_info_gen3 = NULL;
 err_free_prph_info:
-       dma_free_coherent(trans->dev,
-                         sizeof(*prph_info),
-                       prph_info,
-                       trans_pcie->prph_info_dma_addr);
+       dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
+                         trans_pcie->prph_info_dma_addr);
 
 err_free_prph_scratch:
        dma_free_coherent(trans->dev,
@@ -230,29 +231,40 @@ err_free_prph_scratch:
 
 }
 
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
+       if (trans_pcie->iml) {
+               dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
+                                 trans_pcie->iml_dma_addr);
+               trans_pcie->iml_dma_addr = 0;
+               trans_pcie->iml = NULL;
+       }
+
+       iwl_pcie_ctxt_info_free_fw_img(trans);
+
+       if (alive)
+               return;
+
        if (!trans_pcie->ctxt_info_gen3)
                return;
 
+       /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
        dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
                          trans_pcie->ctxt_info_gen3,
                          trans_pcie->ctxt_info_dma_addr);
        trans_pcie->ctxt_info_dma_addr = 0;
        trans_pcie->ctxt_info_gen3 = NULL;
 
-       iwl_pcie_ctxt_info_free_fw_img(trans);
-
        dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
                          trans_pcie->prph_scratch,
                          trans_pcie->prph_scratch_dma_addr);
        trans_pcie->prph_scratch_dma_addr = 0;
        trans_pcie->prph_scratch = NULL;
 
-       dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
-                         trans_pcie->prph_info,
+       /* this is needed for the entire lifetime */
+       dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
                          trans_pcie->prph_info_dma_addr);
        trans_pcie->prph_info_dma_addr = 0;
        trans_pcie->prph_info = NULL;
@@ -290,3 +302,37 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
 
        return 0;
 }
+
+int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+                                                 const void *data, u32 len)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+               &trans_pcie->prph_scratch->ctrl_cfg;
+       int ret;
+
+       if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+               return 0;
+
+       /* only allocate the DRAM if not allocated yet */
+       if (!trans->reduce_power_loaded) {
+               if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
+                       return -EBUSY;
+
+               ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
+                                          &trans_pcie->reduce_power_dram);
+               if (ret < 0) {
+                       IWL_DEBUG_FW(trans,
+                                    "Failed to allocate reduce power DMA %d.\n",
+                                    ret);
+                       return ret;
+               }
+       }
+
+       prph_sc_ctrl->reduce_power_cfg.base_addr =
+               cpu_to_le64(trans_pcie->reduce_power_dram.physical);
+       prph_sc_ctrl->reduce_power_cfg.size =
+               cpu_to_le32(trans_pcie->reduce_power_dram.size);
+
+       return 0;
+}
index d94bd8d..16baee3 100644 (file)
@@ -532,6 +532,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
        IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
        IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+       IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
+       IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
 
        IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
 
@@ -1029,6 +1031,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
                      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
                      iwl_cfg_ma_a0_mr_a0, iwl_ax221_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+                     IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_ma_a0_fm_a0, iwl_ax231_name),
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
                      IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
@@ -1209,14 +1216,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
                if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
                        iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
-               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
-                          CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
+               } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
+                          CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) {
                        iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0;
-               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
-                          CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
+               } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
+                          CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF)) {
                        iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
-               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
-                          CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
+               } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
+                          CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4)) {
                        iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
                }
        }
index 76a512c..cc550f6 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2003-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -109,12 +109,8 @@ struct iwl_rx_completion_desc {
  *     Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
  *     In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
- * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
- * @tr_tail: driver's pointer to the transmission ring tail buffer
- * @tr_tail_dma: physical address of the buffer for the transmission ring tail
- * @cr_tail: driver's pointer to the completion ring tail buffer
- * @cr_tail_dma: physical address of the buffer for the completion ring tail
+ * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
+ * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
@@ -142,10 +138,6 @@ struct iwl_rxq {
                struct iwl_rx_completion_desc *cd;
        };
        dma_addr_t used_bd_dma;
-       __le16 *tr_tail;
-       dma_addr_t tr_tail_dma;
-       __le16 *cr_tail;
-       dma_addr_t cr_tail_dma;
        u32 read;
        u32 write;
        u32 free_count;
@@ -279,6 +271,8 @@ struct cont_rec {
  *     Context information addresses will be taken from here.
  *     This is driver's local copy for keeping track of size and
  *     count for allocating and freeing the memory.
+ * @iml: image loader image virtual address
+ * @iml_dma_addr: image loader image DMA address
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
  * @kw: keep warm address
@@ -317,6 +311,7 @@ struct cont_rec {
  * @alloc_page_lock: spinlock for the page allocator
  * @alloc_page: allocated page to still use parts of
  * @alloc_page_used: how much of the allocated page was already used (bytes)
+ * @rf_name: name/version of the CRF, if any
  */
 struct iwl_trans_pcie {
        struct iwl_rxq *rxq;
@@ -329,6 +324,7 @@ struct iwl_trans_pcie {
        };
        struct iwl_prph_info *prph_info;
        struct iwl_prph_scratch *prph_scratch;
+       void *iml;
        dma_addr_t ctxt_info_dma_addr;
        dma_addr_t prph_info_dma_addr;
        dma_addr_t prph_scratch_dma_addr;
@@ -353,6 +349,7 @@ struct iwl_trans_pcie {
        struct iwl_dma_ptr kw;
 
        struct iwl_dram_data pnvm_dram;
+       struct iwl_dram_data reduce_power_dram;
 
        struct iwl_txq *txq_memory;
 
@@ -409,6 +406,8 @@ struct iwl_trans_pcie {
        bool fw_reset_handshake;
        bool fw_reset_done;
        wait_queue_head_t fw_reset_waitq;
+
+       char rf_name[32];
 };
 
 static inline struct iwl_trans_pcie *
@@ -530,9 +529,6 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
        IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
 }
 
-#define IWL_NUM_OF_COMPLETION_RINGS    31
-#define IWL_NUM_OF_TRANSFER_RINGS      527
-
 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
                                            int start)
 {
index fb84914..4f6f4b2 100644 (file)
@@ -663,7 +663,6 @@ static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
                                  struct iwl_rxq *rxq)
 {
-       struct device *dev = trans->dev;
        bool use_rx_td = (trans->trans_cfg->device_family >=
                          IWL_DEVICE_FAMILY_AX210);
        int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
@@ -685,21 +684,6 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
                                  rxq->used_bd, rxq->used_bd_dma);
        rxq->used_bd_dma = 0;
        rxq->used_bd = NULL;
-
-       if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
-               return;
-
-       if (rxq->tr_tail)
-               dma_free_coherent(dev, sizeof(__le16),
-                                 rxq->tr_tail, rxq->tr_tail_dma);
-       rxq->tr_tail_dma = 0;
-       rxq->tr_tail = NULL;
-
-       if (rxq->cr_tail)
-               dma_free_coherent(dev, sizeof(__le16),
-                                 rxq->cr_tail, rxq->cr_tail_dma);
-       rxq->cr_tail_dma = 0;
-       rxq->cr_tail = NULL;
 }
 
 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
@@ -744,21 +728,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
        rxq->rb_stts_dma =
                trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
 
-       if (!use_rx_td)
-               return 0;
-
-       /* Allocate the driver's pointer to TR tail */
-       rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
-                                         &rxq->tr_tail_dma, GFP_KERNEL);
-       if (!rxq->tr_tail)
-               goto err;
-
-       /* Allocate the driver's pointer to CR tail */
-       rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
-                                         &rxq->cr_tail_dma, GFP_KERNEL);
-       if (!rxq->cr_tail)
-               goto err;
-
        return 0;
 
 err:
@@ -1590,9 +1559,6 @@ restart:
 out:
        /* Backtrack one entry */
        rxq->read = i;
-       /* update cr tail with the rxq read pointer */
-       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               *rxq->cr_tail = cpu_to_le16(r);
        spin_unlock(&rxq->lock);
 
        /*
index 1bcd36e..a340093 100644 (file)
@@ -149,7 +149,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
 
        iwl_pcie_ctxt_info_free_paging(trans);
        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               iwl_pcie_ctxt_info_gen3_free(trans);
+               iwl_pcie_ctxt_info_gen3_free(trans, false);
        else
                iwl_pcie_ctxt_info_free(trans);
 
@@ -240,6 +240,75 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
        return 0;
 }
 
+static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       char *buf = trans_pcie->rf_name;
+       size_t buflen = sizeof(trans_pcie->rf_name);
+       size_t pos;
+       u32 version;
+
+       if (buf[0])
+               return;
+
+       switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
+               pos = scnprintf(buf, buflen, "JF");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF):
+               pos = scnprintf(buf, buflen, "GF");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4):
+               pos = scnprintf(buf, buflen, "GF4");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
+               pos = scnprintf(buf, buflen, "HR");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
+               pos = scnprintf(buf, buflen, "HR1");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
+               pos = scnprintf(buf, buflen, "HRCDB");
+               break;
+       default:
+               return;
+       }
+
+       switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
+               version = iwl_read_prph(trans, CNVI_MBOX_C);
+               switch (version) {
+               case 0x20000:
+                       pos += scnprintf(buf + pos, buflen - pos, " B3");
+                       break;
+               case 0x120000:
+                       pos += scnprintf(buf + pos, buflen - pos, " B5");
+                       break;
+               default:
+                       pos += scnprintf(buf + pos, buflen - pos,
+                                        " (0x%x)", version);
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
+                        trans->hw_rf_id);
+
+       IWL_INFO(trans, "Detected RF %s\n", buf);
+
+       /*
+        * also add a \n for debugfs - need to do it after printing
+        * since our IWL_INFO machinery wants to see a static \n at
+        * the end of the string
+        */
+       pos += scnprintf(buf + pos, buflen - pos, "\n");
+}
+
 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -254,7 +323,10 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        /* now that we got alive we can free the fw image & the context info.
         * paging memory cannot be freed included since FW will still use it
         */
-       iwl_pcie_ctxt_info_free(trans);
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+               iwl_pcie_ctxt_info_gen3_free(trans, true);
+       else
+               iwl_pcie_ctxt_info_free(trans);
 
        /*
         * Re-enable all the interrupts, including the RF-Kill one, now that
@@ -263,6 +335,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        iwl_enable_interrupts(trans);
        mutex_lock(&trans_pcie->mutex);
        iwl_pcie_check_hw_rf_kill(trans);
+
+       iwl_pcie_get_rf_name(trans);
        mutex_unlock(&trans_pcie->mutex);
 }
 
index 239bc17..bee6b45 100644 (file)
@@ -1648,7 +1648,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
                if (ret)
                        IWL_ERR(trans_pcie->trans,
                                "Failed to set affinity mask for IRQ %d\n",
-                               i);
+                               trans_pcie->msix_entries[i].vector);
        }
 }
 
@@ -1943,6 +1943,12 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
                                  trans_pcie->pnvm_dram.block,
                                  trans_pcie->pnvm_dram.physical);
 
+       if (trans_pcie->reduce_power_dram.size)
+               dma_free_coherent(trans->dev,
+                                 trans_pcie->reduce_power_dram.size,
+                                 trans_pcie->reduce_power_dram.block,
+                                 trans_pcie->reduce_power_dram.physical);
+
        mutex_destroy(&trans_pcie->mutex);
        iwl_trans_free(trans);
 }
@@ -2848,11 +2854,28 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
        return bytes_copied;
 }
 
+static ssize_t iwl_dbgfs_rf_read(struct file *file,
+                                char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       struct iwl_trans *trans = file->private_data;
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (!trans_pcie->rf_name[0])
+               return -ENODEV;
+
+       return simple_read_from_buffer(user_buf, count, ppos,
+                                      trans_pcie->rf_name,
+                                      strlen(trans_pcie->rf_name));
+}
+
 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
 DEBUGFS_READ_FILE_OPS(fh_reg);
 DEBUGFS_READ_FILE_OPS(rx_queue);
 DEBUGFS_WRITE_FILE_OPS(csr);
 DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
+DEBUGFS_READ_FILE_OPS(rf);
+
 static const struct file_operations iwl_dbgfs_tx_queue_ops = {
        .owner = THIS_MODULE,
        .open = iwl_dbgfs_tx_queue_open,
@@ -2879,6 +2902,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
        DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
        DEBUGFS_ADD_FILE(rfkill, dir, 0600);
        DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
+       DEBUGFS_ADD_FILE(rf, dir, 0400);
 }
 
 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
@@ -3400,6 +3424,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
        .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
        .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
        .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
+       .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
 #endif
@@ -3413,6 +3438,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        struct iwl_trans *trans;
        int ret, addr_size;
        const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+       void __iomem * const *table;
 
        if (!cfg_trans->gen2)
                ops = &trans_ops_pcie;
@@ -3485,9 +3511,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_no_pci;
        }
 
-       trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
-       if (!trans_pcie->hw_base) {
+       table = pcim_iomap_table(pdev);
+       if (!table) {
                dev_err(&pdev->dev, "pcim_iomap_table failed\n");
+               ret = -ENOMEM;
+               goto out_no_pci;
+       }
+
+       trans_pcie->hw_base = table[0];
+       if (!trans_pcie->hw_base) {
+               dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
                ret = -ENODEV;
                goto out_no_pci;
        }
index 2c7adb4..0aea35c 100644 (file)
@@ -988,15 +988,18 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
  * tsc must be NULL or up to 8 bytes
  */
 int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
-                             int set_tx, const u8 *key, const u8 *rsc,
-                             size_t rsc_len, const u8 *tsc, size_t tsc_len)
+                             int set_tx, const u8 *key, size_t key_len,
+                             const u8 *rsc, size_t rsc_len,
+                             const u8 *tsc, size_t tsc_len)
 {
        struct {
                __le16 idx;
                u8 rsc[ORINOCO_SEQ_LEN];
-               u8 key[TKIP_KEYLEN];
-               u8 tx_mic[MIC_KEYLEN];
-               u8 rx_mic[MIC_KEYLEN];
+               struct {
+                       u8 key[TKIP_KEYLEN];
+                       u8 tx_mic[MIC_KEYLEN];
+                       u8 rx_mic[MIC_KEYLEN];
+               } tkip;
                u8 tsc[ORINOCO_SEQ_LEN];
        } __packed buf;
        struct hermes *hw = &priv->hw;
@@ -1011,8 +1014,9 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
                key_idx |= 0x8000;
 
        buf.idx = cpu_to_le16(key_idx);
-       memcpy(buf.key, key,
-              sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
+       if (key_len != sizeof(buf.tkip))
+               return -EINVAL;
+       memcpy(&buf.tkip, key, sizeof(buf.tkip));
 
        if (rsc_len > sizeof(buf.rsc))
                rsc_len = sizeof(buf.rsc);
index 466d1ed..da5804d 100644 (file)
@@ -38,8 +38,9 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv);
 int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
 int __orinoco_hw_setup_enc(struct orinoco_private *priv);
 int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
-                             int set_tx, const u8 *key, const u8 *rsc,
-                             size_t rsc_len, const u8 *tsc, size_t tsc_len);
+                             int set_tx, const u8 *key, size_t key_len,
+                             const u8 *rsc, size_t rsc_len,
+                             const u8 *tsc, size_t tsc_len);
 int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
 int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
                                    struct net_device *dev,
index 7b6c4ae..4a01260 100644 (file)
@@ -791,7 +791,7 @@ static int orinoco_ioctl_set_encodeext(struct net_device *dev,
 
                        err = __orinoco_hw_set_tkip_key(priv, idx,
                                 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
-                                priv->keys[idx].key,
+                                priv->keys[idx].key, priv->keys[idx].key_len,
                                 tkip_iv, ORINOCO_SEQ_LEN, NULL, 0);
                        if (err)
                                printk(KERN_ERR "%s: Error %d setting TKIP key"
index 470d669..2ff23ab 100644 (file)
@@ -995,6 +995,11 @@ struct host_cmd_ds_802_11_key_material {
        struct mwifiex_ie_type_key_param_set key_param_set;
 } __packed;
 
+struct host_cmd_ds_802_11_key_material_wep {
+       __le16 action;
+       struct mwifiex_ie_type_key_param_set key_param_set[NUM_WEP_KEYS];
+} __packed;
+
 struct host_cmd_ds_gen {
        __le16 command;
        __le16 size;
@@ -2347,6 +2352,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_wmm_get_status get_wmm_status;
                struct host_cmd_ds_802_11_key_material key_material;
                struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
+               struct host_cmd_ds_802_11_key_material_wep key_material_wep;
                struct host_cmd_ds_version_ext verext;
                struct host_cmd_ds_mgmt_frame_reg reg_mask;
                struct host_cmd_ds_remain_on_chan roc_cfg;
index d3a968e..48ea00d 100644 (file)
@@ -840,14 +840,15 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
        }
 
        if (!enc_key) {
-               memset(&key_material->key_param_set, 0,
-                      (NUM_WEP_KEYS *
-                       sizeof(struct mwifiex_ie_type_key_param_set)));
+               struct host_cmd_ds_802_11_key_material_wep *key_material_wep =
+                       (struct host_cmd_ds_802_11_key_material_wep *)key_material;
+               memset(key_material_wep->key_param_set, 0,
+                      sizeof(key_material_wep->key_param_set));
                ret = mwifiex_set_keyparamset_wep(priv,
-                                                 &key_material->key_param_set,
+                                                 &key_material_wep->key_param_set[0],
                                                  &key_param_len);
                cmd->size = cpu_to_le16(key_param_len +
-                                   sizeof(key_material->action) + S_DS_GEN);
+                                   sizeof(key_material_wep->action) + S_DS_GEN);
                return ret;
        } else
                memset(&key_material->key_param_set, 0,
index 84b32a5..3bf6571 100644 (file)
@@ -4552,7 +4552,7 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
        else
                rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
        legacy_rate_mask_to_array(p->legacy_rates, rates);
-       memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
+       memcpy(p->ht_rates, &sta->ht_cap.mcs, 16);
        p->interop = 1;
        p->amsdu_enabled = 0;
 
@@ -5034,7 +5034,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        ap_legacy_rates =
                                ap->supp_rates[NL80211_BAND_5GHZ] << 5;
                }
-               memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
+               memcpy(ap_mcs_rates, &ap->ht_cap.mcs, 16);
 
                rcu_read_unlock();
 
index 72b1cc0..5e1c150 100644 (file)
@@ -191,6 +191,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
 
        q->entry[idx].txwi = txwi;
        q->entry[idx].skb = skb;
+       q->entry[idx].wcid = 0xffff;
 
        return idx;
 }
@@ -349,6 +350,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
                      struct sk_buff *skb, struct mt76_wcid *wcid,
                      struct ieee80211_sta *sta)
 {
+       struct ieee80211_tx_status status = {
+               .sta = sta,
+       };
        struct mt76_tx_info tx_info = {
                .skb = skb,
        };
@@ -360,11 +364,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
        u8 *txwi;
 
        t = mt76_get_txwi(dev);
-       if (!t) {
-               hw = mt76_tx_status_get_hw(dev, skb);
-               ieee80211_free_txskb(hw, skb);
-               return -ENOMEM;
-       }
+       if (!t)
+               goto free_skb;
+
        txwi = mt76_get_txwi_ptr(dev, t);
 
        skb->prev = skb->next = NULL;
@@ -427,8 +429,13 @@ free:
        }
 #endif
 
-       dev_kfree_skb(tx_info.skb);
        mt76_put_txwi(dev, t);
+
+free_skb:
+       status.skb = tx_info.skb;
+       hw = mt76_tx_status_get_hw(dev, tx_info.skb);
+       ieee80211_tx_status_ext(hw, &status);
+
        return ret;
 }
 
index 03fe628..d03aedc 100644 (file)
@@ -83,6 +83,22 @@ static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
        { .throughput = 300 * 1024, .blink_time =  50 },
 };
 
+struct ieee80211_rate mt76_rates[] = {
+       CCK_RATE(0, 10),
+       CCK_RATE(1, 20),
+       CCK_RATE(2, 55),
+       CCK_RATE(3, 110),
+       OFDM_RATE(11, 60),
+       OFDM_RATE(15, 90),
+       OFDM_RATE(10, 120),
+       OFDM_RATE(14, 180),
+       OFDM_RATE(9,  240),
+       OFDM_RATE(13, 360),
+       OFDM_RATE(8,  480),
+       OFDM_RATE(12, 540),
+};
+EXPORT_SYMBOL_GPL(mt76_rates);
+
 static int mt76_led_init(struct mt76_dev *dev)
 {
        struct device_node *np = dev->dev->of_node;
@@ -315,17 +331,6 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
        ieee80211_hw_set(hw, MFP_CAPABLE);
        ieee80211_hw_set(hw, AP_LINK_PS);
        ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
-
-       wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
-       wiphy->interface_modes =
-               BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
-               BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
-               BIT(NL80211_IFTYPE_P2P_CLIENT) |
-               BIT(NL80211_IFTYPE_P2P_GO) |
-               BIT(NL80211_IFTYPE_ADHOC);
 }
 
 struct mt76_phy *
@@ -346,6 +351,17 @@ mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
        phy->hw = hw;
        phy->priv = hw->priv + phy_size;
 
+       hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+       hw->wiphy->interface_modes =
+               BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+               BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+               BIT(NL80211_IFTYPE_P2P_CLIENT) |
+               BIT(NL80211_IFTYPE_P2P_GO) |
+               BIT(NL80211_IFTYPE_ADHOC);
+
        return phy;
 }
 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
@@ -428,6 +444,17 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
        mutex_init(&dev->mcu.mutex);
        dev->tx_worker.fn = mt76_tx_worker;
 
+       hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+       hw->wiphy->interface_modes =
+               BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+               BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+               BIT(NL80211_IFTYPE_P2P_CLIENT) |
+               BIT(NL80211_IFTYPE_P2P_GO) |
+               BIT(NL80211_IFTYPE_ADHOC);
+
        spin_lock_init(&dev->token_lock);
        idr_init(&dev->token);
 
@@ -632,20 +659,19 @@ void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
 }
 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
 
-void mt76_update_survey(struct mt76_dev *dev)
+void mt76_update_survey(struct mt76_phy *phy)
 {
+       struct mt76_dev *dev = phy->dev;
        ktime_t cur_time;
 
        if (dev->drv->update_survey)
-               dev->drv->update_survey(dev);
+               dev->drv->update_survey(phy);
 
        cur_time = ktime_get_boottime();
-       mt76_update_survey_active_time(&dev->phy, cur_time);
-       if (dev->phy2)
-               mt76_update_survey_active_time(dev->phy2, cur_time);
+       mt76_update_survey_active_time(phy, cur_time);
 
        if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
-               struct mt76_channel_state *state = dev->phy.chan_state;
+               struct mt76_channel_state *state = phy->chan_state;
 
                spin_lock_bh(&dev->cc_lock);
                state->cc_bss_rx += dev->cur_cc_bss_rx;
@@ -664,7 +690,7 @@ void mt76_set_channel(struct mt76_phy *phy)
        int timeout = HZ / 5;
 
        wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
-       mt76_update_survey(dev);
+       mt76_update_survey(phy);
 
        phy->chandef = *chandef;
        phy->chan_state = mt76_channel_state(phy, chandef->chan);
@@ -689,7 +715,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
 
        mutex_lock(&dev->mutex);
        if (idx == 0 && dev->drv->update_survey)
-               mt76_update_survey(dev);
+               mt76_update_survey(phy);
 
        sband = &phy->sband_2g;
        if (idx >= sband->sband.n_channels) {
index 36ede65..25c5cee 100644 (file)
@@ -87,6 +87,22 @@ enum mt76_rxq_id {
        __MT_RXQ_MAX
 };
 
+enum mt76_cipher_type {
+       MT_CIPHER_NONE,
+       MT_CIPHER_WEP40,
+       MT_CIPHER_TKIP,
+       MT_CIPHER_TKIP_NO_MIC,
+       MT_CIPHER_AES_CCMP,
+       MT_CIPHER_WEP104,
+       MT_CIPHER_BIP_CMAC_128,
+       MT_CIPHER_WEP128,
+       MT_CIPHER_WAPI,
+       MT_CIPHER_CCMP_CCX,
+       MT_CIPHER_CCMP_256,
+       MT_CIPHER_GCMP,
+       MT_CIPHER_GCMP_256,
+};
+
 struct mt76_queue_buf {
        dma_addr_t addr;
        u16 len;
@@ -320,6 +336,7 @@ enum {
 struct mt76_hw_cap {
        bool has_2ghz;
        bool has_5ghz;
+       bool has_6ghz;
 };
 
 #define MT_DRV_TXWI_NO_FREE            BIT(0)
@@ -336,7 +353,7 @@ struct mt76_driver_ops {
        u16 token_size;
        u8 mcs_rates;
 
-       void (*update_survey)(struct mt76_dev *dev);
+       void (*update_survey)(struct mt76_phy *phy);
 
        int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
                              enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -738,6 +755,21 @@ enum mt76_phy_type {
        MT_PHY_TYPE_HE_MU,
 };
 
+#define CCK_RATE(_idx, _rate) {                                        \
+       .bitrate = _rate,                                       \
+       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
+       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),            \
+       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx),  \
+}
+
+#define OFDM_RATE(_idx, _rate) {                               \
+       .bitrate = _rate,                                       \
+       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),           \
+       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),     \
+}
+
+extern struct ieee80211_rate mt76_rates[12];
+
 #define __mt76_rr(dev, ...)    (dev)->bus->rr((dev), __VA_ARGS__)
 #define __mt76_wr(dev, ...)    (dev)->bus->wr((dev), __VA_ARGS__)
 #define __mt76_rmw(dev, ...)   (dev)->bus->rmw((dev), __VA_ARGS__)
@@ -1031,7 +1063,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
                                  bool more_data);
 bool mt76_has_tx_pending(struct mt76_phy *phy);
 void mt76_set_channel(struct mt76_phy *phy);
-void mt76_update_survey(struct mt76_dev *dev);
+void mt76_update_survey(struct mt76_phy *phy);
 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
                    struct survey_info *survey);
@@ -1056,7 +1088,14 @@ struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
                                       struct sk_buff_head *list);
 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
                             struct sk_buff_head *list);
-void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb);
+void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
+                           struct list_head *free_list);
+static inline void
+mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
+{
+    __mt76_tx_complete_skb(dev, wcid, skb, NULL);
+}
+
 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
                          bool flush);
 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -1253,4 +1292,15 @@ mt76_token_put(struct mt76_dev *dev, int token)
 
        return txwi;
 }
+
+static inline int
+mt76_get_next_pkt_id(struct mt76_wcid *wcid)
+{
+       wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
+       if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
+           wcid->packet_id == MT_PACKET_ID_NO_SKB)
+               wcid->packet_id = MT_PACKET_ID_FIRST;
+
+       return wcid->packet_id;
+}
 #endif
index e1b2cfa..031d39a 100644 (file)
@@ -304,34 +304,6 @@ mt7603_init_hardware(struct mt7603_dev *dev)
        return 0;
 }
 
-#define CCK_RATE(_idx, _rate) {                                        \
-       .bitrate = _rate,                                       \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),            \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx),  \
-}
-
-#define OFDM_RATE(_idx, _rate) {                               \
-       .bitrate = _rate,                                       \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),           \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),     \
-}
-
-static struct ieee80211_rate mt7603_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(11, 60),
-       OFDM_RATE(15, 90),
-       OFDM_RATE(10, 120),
-       OFDM_RATE(14, 180),
-       OFDM_RATE(9,  240),
-       OFDM_RATE(13, 360),
-       OFDM_RATE(8,  480),
-       OFDM_RATE(12, 540),
-};
-
 static const struct ieee80211_iface_limit if_limits[] = {
        {
                .max = 1,
@@ -569,8 +541,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
 
        wiphy->reg_notifier = mt7603_regd_notifier;
 
-       ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
-                                  ARRAY_SIZE(mt7603_rates));
+       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
        if (ret)
                return ret;
 
index fbceb07..3972c56 100644 (file)
@@ -550,14 +550,27 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
                u8 *data = (u8 *)rxd;
 
                if (status->flag & RX_FLAG_DECRYPTED) {
-                       status->iv[0] = data[5];
-                       status->iv[1] = data[4];
-                       status->iv[2] = data[3];
-                       status->iv[3] = data[2];
-                       status->iv[4] = data[1];
-                       status->iv[5] = data[0];
-
-                       insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                       switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
+                       case MT_CIPHER_AES_CCMP:
+                       case MT_CIPHER_CCMP_CCX:
+                       case MT_CIPHER_CCMP_256:
+                               insert_ccmp_hdr =
+                                       FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                               fallthrough;
+                       case MT_CIPHER_TKIP:
+                       case MT_CIPHER_TKIP_NO_MIC:
+                       case MT_CIPHER_GCMP:
+                       case MT_CIPHER_GCMP_256:
+                               status->iv[0] = data[5];
+                               status->iv[1] = data[4];
+                               status->iv[2] = data[3];
+                               status->iv[3] = data[2];
+                               status->iv[4] = data[1];
+                               status->iv[5] = data[0];
+                               break;
+                       default:
+                               break;
+                       }
                }
 
                rxd += 4;
@@ -831,7 +844,7 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
        sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
 }
 
-static enum mt7603_cipher_type
+static enum mt76_cipher_type
 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
 {
        memset(key_data, 0, 32);
@@ -863,7 +876,7 @@ mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
                        struct ieee80211_key_conf *key)
 {
-       enum mt7603_cipher_type cipher;
+       enum mt76_cipher_type cipher;
        u32 addr = mt7603_wtbl3_addr(wcid);
        u8 key_data[32];
        int key_len = sizeof(key_data);
@@ -1213,7 +1226,7 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
                if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
-                       ieee80211_tx_info_clear_status(info);
+                       info->status.rates[0].count = 0;
                        info->status.rates[0].idx = -1;
                }
 
@@ -1584,12 +1597,12 @@ trigger:
        return true;
 }
 
-void mt7603_update_channel(struct mt76_dev *mdev)
+void mt7603_update_channel(struct mt76_phy *mphy)
 {
-       struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+       struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76);
        struct mt76_channel_state *state;
 
-       state = mdev->phy.chan_state;
+       state = mphy->chan_state;
        state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
 }
 
@@ -1806,7 +1819,7 @@ void mt7603_mac_work(struct work_struct *work)
        mutex_lock(&dev->mt76.mutex);
 
        dev->mphy.mac_work_count++;
-       mt76_update_survey(&dev->mt76);
+       mt76_update_survey(&dev->mphy);
        mt7603_edcca_check(dev);
 
        for (i = 0, idx = 0; i < 2; i++) {
index 1df5b9f..0fd46d9 100644 (file)
@@ -256,7 +256,7 @@ void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
 
 void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t);
 
-void mt7603_update_channel(struct mt76_dev *mdev);
+void mt7603_update_channel(struct mt76_phy *mphy);
 
 void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
 void mt7603_cca_stats_reset(struct mt7603_dev *dev);
index 6741e69..3b90109 100644 (file)
@@ -765,16 +765,4 @@ enum {
 #define MT_WTBL1_OR                    (MT_WTBL1_BASE + 0x2300)
 #define MT_WTBL1_OR_PSM_WRITE          BIT(31)
 
-enum mt7603_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_TKIP_NO_MIC,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_BIP_CMAC_128,
-       MT_CIPHER_WEP128,
-       MT_CIPHER_WAPI,
-};
-
 #endif
index e8fc4a7..83f9861 100644 (file)
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: ISC
 
 obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
 obj-$(CONFIG_MT7615E) += mt7615e.o
index 676bb22..cb46597 100644 (file)
@@ -75,7 +75,7 @@ mt7615_pm_set(void *data, u64 val)
        if (!mt7615_wait_for_mcu_init(dev))
                return 0;
 
-       if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
+       if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76))
                return -EOPNOTSUPP;
 
        if (val == pm->enable)
@@ -319,24 +319,6 @@ mt7615_radio_read(struct seq_file *s, void *data)
        return 0;
 }
 
-static int mt7615_read_temperature(struct seq_file *s, void *data)
-{
-       struct mt7615_dev *dev = dev_get_drvdata(s->private);
-       int temp;
-
-       if (!mt7615_wait_for_mcu_init(dev))
-               return 0;
-
-       /* cpu */
-       mt7615_mutex_acquire(dev);
-       temp = mt7615_mcu_get_temperature(dev, 0);
-       mt7615_mutex_release(dev);
-
-       seq_printf(s, "Temperature: %d\n", temp);
-
-       return 0;
-}
-
 static int
 mt7615_queues_acq(struct seq_file *s, void *data)
 {
@@ -566,8 +548,6 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
 
        debugfs_create_file("reset_test", 0200, dir, dev,
                            &fops_reset_test);
-       debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
-                                   mt7615_read_temperature);
        debugfs_create_file("ext_mac_addr", 0600, dir, dev, &fops_ext_mac_addr);
 
        debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf);
index 8004ae5..00aefea 100644 (file)
@@ -81,7 +81,7 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
        if (napi_complete(napi))
                mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
 
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 
        return 0;
 }
@@ -99,7 +99,7 @@ static int mt7615_poll_rx(struct napi_struct *napi, int budget)
                return 0;
        }
        done = mt76_dma_rx_poll(napi, budget);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 
        return done;
 }
@@ -222,14 +222,9 @@ void mt7615_dma_start(struct mt7615_dev *dev)
 int mt7615_dma_init(struct mt7615_dev *dev)
 {
        int rx_ring_size = MT7615_RX_RING_SIZE;
-       int rx_buf_size = MT_RX_BUF_SIZE;
        u32 mask;
        int ret;
 
-       /* Increase buffer size to receive large VHT MPDUs */
-       if (dev->mphy.cap.has_5ghz)
-               rx_buf_size *= 2;
-
        mt76_dma_attach(&dev->mt76);
 
        mt76_wr(dev, MT_WPDMA_GLO_CFG,
@@ -270,7 +265,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
 
        /* init rx queues */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
-                              MT7615_RX_MCU_RING_SIZE, rx_buf_size,
+                              MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
                               MT_RX_RING_BASE);
        if (ret)
                return ret;
@@ -279,7 +274,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
            rx_ring_size /= 2;
 
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
-                              rx_ring_size, rx_buf_size, MT_RX_RING_BASE);
+                              rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE);
        if (ret)
                return ret;
 
index d20f05a..2f1ac64 100644 (file)
@@ -8,11 +8,61 @@
  */
 
 #include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
 #include "mt7615.h"
 #include "mac.h"
 #include "mcu.h"
 #include "eeprom.h"
 
+static ssize_t mt7615_thermal_show_temp(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct mt7615_dev *mdev = dev_get_drvdata(dev);
+       int temperature;
+
+       if (!mt7615_wait_for_mcu_init(mdev))
+               return 0;
+
+       mt7615_mutex_acquire(mdev);
+       temperature = mt7615_mcu_get_temperature(mdev);
+       mt7615_mutex_release(mdev);
+
+       if (temperature < 0)
+               return temperature;
+
+       /* display in millidegree celcius */
+       return sprintf(buf, "%u\n", temperature * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7615_thermal_show_temp,
+                         NULL, 0);
+
+static struct attribute *mt7615_hwmon_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(mt7615_hwmon);
+
+int mt7615_thermal_init(struct mt7615_dev *dev)
+{
+       struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+       struct device *hwmon;
+
+       if (!IS_REACHABLE(CONFIG_HWMON))
+               return 0;
+
+       hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
+                                                      wiphy_name(wiphy), dev,
+                                                      mt7615_hwmon_groups);
+       if (IS_ERR(hwmon))
+               return PTR_ERR(hwmon);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_thermal_init);
+
 static void
 mt7615_phy_init(struct mt7615_dev *dev)
 {
@@ -174,35 +224,6 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
 }
 EXPORT_SYMBOL_GPL(mt7615_wait_for_mcu_init);
 
-#define CCK_RATE(_idx, _rate) {                                                \
-       .bitrate = _rate,                                               \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                         \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),                    \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)),        \
-}
-
-#define OFDM_RATE(_idx, _rate) {                                       \
-       .bitrate = _rate,                                               \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),                   \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),             \
-}
-
-struct ieee80211_rate mt7615_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(11, 60),
-       OFDM_RATE(15, 90),
-       OFDM_RATE(10, 120),
-       OFDM_RATE(14, 180),
-       OFDM_RATE(9,  240),
-       OFDM_RATE(13, 360),
-       OFDM_RATE(8,  480),
-       OFDM_RATE(12, 540),
-};
-EXPORT_SYMBOL_GPL(mt7615_rates);
-
 static const struct ieee80211_iface_limit if_limits[] = {
        {
                .max = 1,
@@ -362,7 +383,7 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
        wiphy->reg_notifier = mt7615_regd_notifier;
 
        wiphy->max_sched_scan_plan_interval =
-               MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL;
+               MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL;
        wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
        wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
        wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
@@ -472,8 +493,8 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
        for (i = 0; i <= MT_TXQ_PSD ; i++)
                mphy->q_tx[i] = dev->mphy.q_tx[i];
 
-       ret = mt76_register_phy(mphy, true, mt7615_rates,
-                               ARRAY_SIZE(mt7615_rates));
+       ret = mt76_register_phy(mphy, true, mt76_rates,
+                               ARRAY_SIZE(mt76_rates));
        if (ret)
                ieee80211_free_hw(mphy->hw);
 
index e2dcfee..ff3f85e 100644 (file)
@@ -20,7 +20,7 @@
 #define to_rssi(field, rxv)            ((FIELD_GET(field, rxv) - 220) / 2)
 
 static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
-       .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+       .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
        .radar_pattern = {
                [5] =  { 1, 0,  6, 32, 28, 0, 17,  990, 5010, 1, 1 },
                [6] =  { 1, 0,  9, 32, 28, 0, 27,  615, 5010, 1, 1 },
@@ -34,7 +34,7 @@ static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
 };
 
 static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
-       .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+       .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
        .radar_pattern = {
                [0] = { 1, 0,  9,  32, 28, 0, 13, 508, 3076, 1,  1 },
                [1] = { 1, 0, 12,  32, 28, 0, 17, 140,  240, 1,  1 },
@@ -45,7 +45,7 @@ static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
 };
 
 static const struct mt7615_dfs_radar_spec jp_radar_specs = {
-       .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+       .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
        .radar_pattern = {
                [0] =  { 1, 0,  8, 32, 28, 0, 13,  508, 3076, 1,  1 },
                [1] =  { 1, 0, 12, 32, 28, 0, 17,  140,  240, 1,  1 },
@@ -57,6 +57,33 @@ static const struct mt7615_dfs_radar_spec jp_radar_specs = {
        },
 };
 
+static enum mt76_cipher_type
+mt7615_mac_get_cipher(int cipher)
+{
+       switch (cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+               return MT_CIPHER_WEP40;
+       case WLAN_CIPHER_SUITE_WEP104:
+               return MT_CIPHER_WEP104;
+       case WLAN_CIPHER_SUITE_TKIP:
+               return MT_CIPHER_TKIP;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               return MT_CIPHER_BIP_CMAC_128;
+       case WLAN_CIPHER_SUITE_CCMP:
+               return MT_CIPHER_AES_CCMP;
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               return MT_CIPHER_CCMP_256;
+       case WLAN_CIPHER_SUITE_GCMP:
+               return MT_CIPHER_GCMP;
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               return MT_CIPHER_GCMP_256;
+       case WLAN_CIPHER_SUITE_SMS4:
+               return MT_CIPHER_WAPI;
+       default:
+               return MT_CIPHER_NONE;
+       }
+}
+
 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
                                            u8 idx, bool unicast)
 {
@@ -313,14 +340,27 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
                u8 *data = (u8 *)rxd;
 
                if (status->flag & RX_FLAG_DECRYPTED) {
-                       status->iv[0] = data[5];
-                       status->iv[1] = data[4];
-                       status->iv[2] = data[3];
-                       status->iv[3] = data[2];
-                       status->iv[4] = data[1];
-                       status->iv[5] = data[0];
-
-                       insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                       switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
+                       case MT_CIPHER_AES_CCMP:
+                       case MT_CIPHER_CCMP_CCX:
+                       case MT_CIPHER_CCMP_256:
+                               insert_ccmp_hdr =
+                                       FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                               fallthrough;
+                       case MT_CIPHER_TKIP:
+                       case MT_CIPHER_TKIP_NO_MIC:
+                       case MT_CIPHER_GCMP:
+                       case MT_CIPHER_GCMP_256:
+                               status->iv[0] = data[5];
+                               status->iv[1] = data[4];
+                               status->iv[2] = data[3];
+                               status->iv[3] = data[2];
+                               status->iv[4] = data[1];
+                               status->iv[5] = data[0];
+                               break;
+                       default:
+                               break;
+                       }
                }
                rxd += 4;
                if ((u8 *)rxd - skb->data >= skb->len)
@@ -1062,7 +1102,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
        idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
        addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
 
-       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
+       mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
        sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
        sta->rate_set_tsf |= rd.rateset;
 
@@ -1078,7 +1118,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
 static int
 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
                           struct ieee80211_key_conf *key,
-                          enum mt7615_cipher_type cipher, u16 cipher_mask,
+                          enum mt76_cipher_type cipher, u16 cipher_mask,
                           enum set_key_cmd cmd)
 {
        u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
@@ -1118,7 +1158,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
 
 static int
 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
-                         enum mt7615_cipher_type cipher, u16 cipher_mask,
+                         enum mt76_cipher_type cipher, u16 cipher_mask,
                          int keyidx, enum set_key_cmd cmd)
 {
        u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
@@ -1157,7 +1197,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
 
 static void
 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
-                             enum mt7615_cipher_type cipher, u16 cipher_mask,
+                             enum mt76_cipher_type cipher, u16 cipher_mask,
                              enum set_key_cmd cmd)
 {
        u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
@@ -1183,7 +1223,7 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
                              struct ieee80211_key_conf *key,
                              enum set_key_cmd cmd)
 {
-       enum mt7615_cipher_type cipher;
+       enum mt76_cipher_type cipher;
        u16 cipher_mask = wcid->cipher;
        int err;
 
@@ -1235,22 +1275,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
        int first_idx = 0, last_idx;
        int i, idx, count;
        bool fixed_rate, ack_timeout;
-       bool probe, ampdu, cck = false;
+       bool ampdu, cck = false;
        bool rs_idx;
        u32 rate_set_tsf;
        u32 final_rate, final_rate_flags, final_nss, txs;
 
-       fixed_rate = info->status.rates[0].count;
-       probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
-
        txs = le32_to_cpu(txs_data[1]);
-       ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
+       ampdu = txs & MT_TXS1_AMPDU;
 
        txs = le32_to_cpu(txs_data[3]);
        count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
        last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
 
        txs = le32_to_cpu(txs_data[0]);
+       fixed_rate = txs & MT_TXS0_FIXED_RATE;
        final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
        ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
 
@@ -1272,7 +1310,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
 
        first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
 
-       if (fixed_rate && !probe) {
+       if (fixed_rate) {
                info->status.rates[0].count = count;
                i = 0;
                goto out;
@@ -1391,7 +1429,7 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
                if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
-                       ieee80211_tx_info_clear_status(info);
+                       info->status.rates[0].count = 0;
                        info->status.rates[0].idx = -1;
                }
 
@@ -1821,43 +1859,41 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
        state->noise = -(phy->noise >> 4);
 }
 
-static void __mt7615_update_channel(struct mt7615_dev *dev)
+static void mt7615_update_survey(struct mt7615_dev *dev)
 {
        struct mt76_dev *mdev = &dev->mt76;
+       ktime_t cur_time;
+
+       /* MT7615 can only update both phys simultaneously
+        * since some reisters are shared across bands.
+        */
 
        mt7615_phy_update_channel(&mdev->phy, 0);
        if (mdev->phy2)
                mt7615_phy_update_channel(mdev->phy2, 1);
 
+       cur_time = ktime_get_boottime();
+
+       mt76_update_survey_active_time(&mdev->phy, cur_time);
+       if (mdev->phy2)
+               mt76_update_survey_active_time(mdev->phy2, cur_time);
+
        /* reset obss airtime */
        mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
 }
 
-void mt7615_update_channel(struct mt76_dev *mdev)
+void mt7615_update_channel(struct mt76_phy *mphy)
 {
-       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
 
        if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
                return;
 
-       __mt7615_update_channel(dev);
+       mt7615_update_survey(dev);
        mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
 }
 EXPORT_SYMBOL_GPL(mt7615_update_channel);
 
-static void mt7615_update_survey(struct mt7615_dev *dev)
-{
-       struct mt76_dev *mdev = &dev->mt76;
-       ktime_t cur_time;
-
-       __mt7615_update_channel(dev);
-       cur_time = ktime_get_boottime();
-
-       mt76_update_survey_active_time(&mdev->phy, cur_time);
-       if (mdev->phy2)
-               mt76_update_survey_active_time(mdev->phy2, cur_time);
-}
-
 static void
 mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
 {
@@ -1906,15 +1942,26 @@ void mt7615_pm_wake_work(struct work_struct *work)
        mphy = dev->phy.mt76;
 
        if (!mt7615_mcu_set_drv_ctrl(dev)) {
+               struct mt76_dev *mdev = &dev->mt76;
                int i;
 
-               mt76_for_each_q_rx(&dev->mt76, i)
-                       napi_schedule(&dev->mt76.napi[i]);
-               mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
-               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
-               if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+               if (mt76_is_sdio(mdev)) {
+                       mt76_worker_schedule(&mdev->sdio.txrx_worker);
+               } else {
+                       mt76_for_each_q_rx(mdev, i)
+                               napi_schedule(&mdev->napi[i]);
+                       mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
+                       mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
+                                             false);
+               }
+
+               if (test_bit(MT76_STATE_RUNNING, &mphy->state)) {
+                       unsigned long timeout;
+
+                       timeout = mt7615_get_macwork_timeout(dev);
                        ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                                    MT7615_WATCHDOG_TIME);
+                                                    timeout);
+               }
        }
 
        ieee80211_wake_queues(mphy->hw);
@@ -1949,6 +1996,7 @@ void mt7615_mac_work(struct work_struct *work)
 {
        struct mt7615_phy *phy;
        struct mt76_phy *mphy;
+       unsigned long timeout;
 
        mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
                                               mac_work.work);
@@ -1967,8 +2015,9 @@ void mt7615_mac_work(struct work_struct *work)
        mt7615_mutex_release(phy->dev);
 
        mt76_tx_status_check(mphy->dev, NULL, false);
-       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                    MT7615_WATCHDOG_TIME);
+
+       timeout = mt7615_get_macwork_timeout(phy->dev);
+       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout);
 }
 
 void mt7615_tx_token_put(struct mt7615_dev *dev)
@@ -2049,14 +2098,12 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
 {
        const struct mt7615_dfs_radar_spec *radar_specs;
        struct mt7615_dev *dev = phy->dev;
-       int err, i;
+       int err, i, lpn = 500;
 
        switch (dev->mt76.region) {
        case NL80211_DFS_FCC:
                radar_specs = &fcc_radar_specs;
-               err = mt7615_mcu_set_fcc5_lpn(dev, 8);
-               if (err < 0)
-                       return err;
+               lpn = 8;
                break;
        case NL80211_DFS_ETSI:
                radar_specs = &etsi_radar_specs;
@@ -2068,6 +2115,11 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
                return -EINVAL;
        }
 
+       /* avoid FCC radar detection in non-FCC region */
+       err = mt7615_mcu_set_fcc5_lpn(dev, lpn);
+       if (err < 0)
+               return err;
+
        for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
                err = mt7615_mcu_set_radar_th(dev, i,
                                              &radar_specs->radar_pattern[i]);
index 6bf9da0..46f283e 100644 (file)
@@ -383,48 +383,6 @@ struct mt7615_dfs_radar_spec {
        struct mt7615_dfs_pattern radar_pattern[16];
 };
 
-enum mt7615_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_TKIP_NO_MIC,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_BIP_CMAC_128,
-       MT_CIPHER_WEP128,
-       MT_CIPHER_WAPI,
-       MT_CIPHER_CCMP_256 = 10,
-       MT_CIPHER_GCMP,
-       MT_CIPHER_GCMP_256,
-};
-
-static inline enum mt7615_cipher_type
-mt7615_mac_get_cipher(int cipher)
-{
-       switch (cipher) {
-       case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
-       case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
-       case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
-       case WLAN_CIPHER_SUITE_AES_CMAC:
-               return MT_CIPHER_BIP_CMAC_128;
-       case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
-       case WLAN_CIPHER_SUITE_CCMP_256:
-               return MT_CIPHER_CCMP_256;
-       case WLAN_CIPHER_SUITE_GCMP:
-               return MT_CIPHER_GCMP;
-       case WLAN_CIPHER_SUITE_GCMP_256:
-               return MT_CIPHER_GCMP_256;
-       case WLAN_CIPHER_SUITE_SMS4:
-               return MT_CIPHER_WAPI;
-       default:
-               return MT_CIPHER_NONE;
-       }
-}
-
 static inline struct mt7615_txp_common *
 mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
 {
index 39733b3..dada43d 100644 (file)
@@ -28,6 +28,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
 {
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
        struct mt7615_phy *phy = mt7615_hw_phy(hw);
+       unsigned long timeout;
        bool running;
        int ret;
 
@@ -78,8 +79,8 @@ static int mt7615_start(struct ieee80211_hw *hw)
 
        set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
 
-       ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
-                                    MT7615_WATCHDOG_TIME);
+       timeout = mt7615_get_macwork_timeout(dev);
+       ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
 
        if (!running)
                mt7615_mac_reset_counters(dev);
@@ -240,8 +241,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
        }
 
        ret = mt7615_mcu_add_dev_info(phy, vif, true);
-       if (ret)
-               goto out;
 out:
        mt7615_mutex_release(dev);
 
@@ -352,10 +351,12 @@ out:
        mt7615_mutex_release(dev);
 
        mt76_worker_schedule(&dev->mt76.tx_worker);
-       if (!mt76_testmode_enabled(phy->mt76))
+       if (!mt76_testmode_enabled(phy->mt76)) {
+               unsigned long timeout = mt7615_get_macwork_timeout(dev);
+
                ieee80211_queue_delayed_work(phy->mt76->hw,
-                                            &phy->mt76->mac_work,
-                                            MT7615_WATCHDOG_TIME);
+                                            &phy->mt76->mac_work, timeout);
+       }
 
        return ret;
 }
@@ -695,7 +696,7 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
        msta->n_rates = i;
        if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) {
                mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
-               mt76_connac_pm_unref(&dev->pm);
+               mt76_connac_pm_unref(phy->mt76, &dev->pm);
        }
        spin_unlock_bh(&dev->mt76.lock);
 }
@@ -711,7 +712,7 @@ void mt7615_tx_worker(struct mt76_worker *w)
        }
 
        mt76_tx_worker_run(&dev->mt76);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 }
 
 static void mt7615_tx(struct ieee80211_hw *hw,
@@ -741,7 +742,7 @@ static void mt7615_tx(struct ieee80211_hw *hw,
 
        if (mt76_connac_pm_ref(mphy, &dev->pm)) {
                mt76_tx(mphy, control->sta, wcid, skb);
-               mt76_connac_pm_unref(&dev->pm);
+               mt76_connac_pm_unref(mphy, &dev->pm);
                return;
        }
 
@@ -881,7 +882,8 @@ mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
        mt7615_mutex_acquire(dev);
 
-       mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
+       /* TSF read */
+       mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_READ);
        tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
        tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
 
@@ -911,7 +913,33 @@ mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
        mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
        /* TSF software overwrite */
-       mt76_set(dev, reg, MT_LPON_TCR_WRITE);
+       mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_WRITE);
+
+       mt7615_mutex_release(dev);
+}
+
+static void
+mt7615_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 s64 timestamp)
+{
+       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
+       union {
+               u64 t64;
+               u32 t32[2];
+       } tsf = { .t64 = timestamp, };
+       u16 idx = mvif->mt76.omac_idx;
+       u32 reg;
+
+       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
+       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
+
+       mt7615_mutex_acquire(dev);
+
+       mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
+       mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
+       /* TSF software adjust*/
+       mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_ADJUST);
 
        mt7615_mutex_release(dev);
 }
@@ -1162,7 +1190,7 @@ static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw,
        else
                clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
 
-       mt7615_mcu_sta_update_hdr_trans(dev, vif, sta);
+       mt7615_mcu_set_sta_decap_offload(dev, vif, sta);
 }
 
 #ifdef CONFIG_PM
@@ -1200,6 +1228,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
 {
        struct mt7615_phy *phy = mt7615_hw_phy(hw);
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
+       unsigned long timeout;
        bool running;
 
        mt7615_mutex_acquire(dev);
@@ -1223,8 +1252,8 @@ static int mt7615_resume(struct ieee80211_hw *hw)
                                            mt76_connac_mcu_set_suspend_iter,
                                            phy->mt76);
 
-       ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
-                                    MT7615_WATCHDOG_TIME);
+       timeout = mt7615_get_macwork_timeout(dev);
+       ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
 
        mt7615_mutex_release(dev);
 
@@ -1278,6 +1307,7 @@ const struct ieee80211_ops mt7615_ops = {
        .get_stats = mt7615_get_stats,
        .get_tsf = mt7615_get_tsf,
        .set_tsf = mt7615_set_tsf,
+       .offset_tsf = mt7615_offset_tsf,
        .get_survey = mt76_get_survey,
        .get_antenna = mt76_get_antenna,
        .set_antenna = mt7615_set_antenna,
index aa42af9..f8a0969 100644 (file)
@@ -411,6 +411,9 @@ mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb)
 
        c = (struct mt7615_mcu_csa_notify *)skb->data;
 
+       if (c->omac_idx > EXT_BSSID_MAX)
+               return;
+
        if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx))
                mphy = dev->mt76.phy2;
 
@@ -427,6 +430,10 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
 
        r = (struct mt7615_mcu_rdd_report *)skb->data;
 
+       if (!dev->radar_pattern.n_pulses && !r->long_detected &&
+           !r->constant_prf_detected && !r->staggered_prf_detected)
+               return;
+
        if (r->band_idx && dev->mt76.phy2)
                mphy = dev->mt76.phy2;
 
@@ -1021,9 +1028,10 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
        if (IS_ERR(sskb))
                return PTR_ERR(sskb);
 
-       mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable);
+       mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable, true);
        if (enable && sta)
-               mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0);
+               mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
+                                       MT76_STA_INFO_STATE_ASSOC);
 
        wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
                                                  WTBL_RESET_AND_SET, NULL,
@@ -1037,8 +1045,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                if (sta)
                        mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
                                                    NULL, wtbl_hdr);
-               mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, &msta->wcid, NULL,
-                                                  wtbl_hdr);
+               mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
+                                                  NULL, wtbl_hdr);
        }
 
        cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
@@ -1058,6 +1066,26 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
        return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
 }
 
+static int
+mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_sta *sta)
+{
+       struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+       struct wtbl_req_hdr *wtbl_hdr;
+       struct sk_buff *skb = NULL;
+
+       wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+                                                 WTBL_SET, NULL, &skb);
+       if (IS_ERR(wtbl_hdr))
+               return PTR_ERR(wtbl_hdr);
+
+       mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
+                                          wtbl_hdr);
+       return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
+                                    true);
+}
+
 static const struct mt7615_mcu_ops wtbl_update_ops = {
        .add_beacon_offload = mt7615_mcu_add_beacon_offload,
        .set_pm_state = mt7615_mcu_ctrl_pm_state,
@@ -1068,6 +1096,7 @@ static const struct mt7615_mcu_ops wtbl_update_ops = {
        .sta_add = mt7615_mcu_wtbl_sta_add,
        .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
        .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+       .set_sta_decap_offload = mt7615_mcu_wtbl_update_hdr_trans,
 };
 
 static int
@@ -1120,18 +1149,21 @@ mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev,
 
 static int
 __mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif,
-                    struct ieee80211_sta *sta, bool enable, int cmd)
+                    struct ieee80211_sta *sta, bool enable, int cmd,
+                    bool offload_fw)
 {
        struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
        struct mt76_sta_cmd_info info = {
                .sta = sta,
                .vif = vif,
+               .offload_fw = offload_fw,
                .enable = enable,
+               .newly = true,
                .cmd = cmd,
        };
 
        info.wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid;
-       return mt76_connac_mcu_add_sta_cmd(phy, &info);
+       return mt76_connac_mcu_sta_cmd(phy, &info);
 }
 
 static int
@@ -1139,7 +1171,19 @@ mt7615_mcu_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta, bool enable)
 {
        return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
-                                   MCU_EXT_CMD_STA_REC_UPDATE);
+                                   MCU_EXT_CMD_STA_REC_UPDATE, false);
+}
+
+static int
+mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_sta *sta)
+{
+       struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+       return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
+                                                   vif, &msta->wcid,
+                                                   MCU_EXT_CMD_STA_REC_UPDATE);
 }
 
 static const struct mt7615_mcu_ops sta_update_ops = {
@@ -1152,27 +1196,9 @@ static const struct mt7615_mcu_ops sta_update_ops = {
        .sta_add = mt7615_mcu_add_sta,
        .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
        .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+       .set_sta_decap_offload = mt7615_mcu_sta_update_hdr_trans,
 };
 
-int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
-                                   struct ieee80211_vif *vif,
-                                   struct ieee80211_sta *sta)
-{
-       struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
-       struct wtbl_req_hdr *wtbl_hdr;
-       struct sk_buff *skb = NULL;
-
-       wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
-                                                 WTBL_SET, NULL, &skb);
-       if (IS_ERR(wtbl_hdr))
-               return PTR_ERR(wtbl_hdr);
-
-       mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, &msta->wcid, NULL, wtbl_hdr);
-
-       return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
-                                    true);
-}
-
 static int
 mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
 {
@@ -1280,7 +1306,7 @@ mt7615_mcu_uni_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta, bool enable)
 {
        return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
-                                   MCU_UNI_CMD_STA_REC_UPDATE);
+                                   MCU_UNI_CMD_STA_REC_UPDATE, true);
 }
 
 static int
@@ -1338,6 +1364,18 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
                                     MCU_UNI_CMD_STA_REC_UPDATE, true);
 }
 
+static int
+mt7615_mcu_sta_uni_update_hdr_trans(struct mt7615_dev *dev,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_sta *sta)
+{
+       struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+       return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
+                                                   vif, &msta->wcid,
+                                                   MCU_UNI_CMD_STA_REC_UPDATE);
+}
+
 static const struct mt7615_mcu_ops uni_update_ops = {
        .add_beacon_offload = mt7615_mcu_uni_add_beacon_offload,
        .set_pm_state = mt7615_mcu_uni_ctrl_pm_state,
@@ -1348,6 +1386,7 @@ static const struct mt7615_mcu_ops uni_update_ops = {
        .sta_add = mt7615_mcu_uni_add_sta,
        .set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
        .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+       .set_sta_decap_offload = mt7615_mcu_sta_uni_update_hdr_trans,
 };
 
 int mt7615_mcu_restart(struct mt76_dev *dev)
@@ -2322,14 +2361,12 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
        return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
 }
 
-int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev)
 {
        struct {
                u8 action;
                u8 rsv[3];
-       } req = {
-               .action = index,
-       };
+       } req = {};
 
        return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
                                 sizeof(req), true);
index 202ea23..71719c7 100644 (file)
@@ -229,7 +229,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
                               GFP_KERNEL);
        if (!bus_ops) {
                ret = -ENOMEM;
-               goto error;
+               goto err_free_dev;
        }
 
        bus_ops->rr = mt7615_rr;
@@ -242,17 +242,20 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
        ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
                               IRQF_SHARED, KBUILD_MODNAME, dev);
        if (ret)
-               goto error;
+               goto err_free_dev;
 
        if (is_mt7663(mdev))
                mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
 
        ret = mt7615_register_device(dev);
        if (ret)
-               goto error;
+               goto err_free_irq;
 
        return 0;
-error:
+
+err_free_irq:
+       devm_free_irq(pdev, irq, dev);
+err_free_dev:
        mt76_free_device(&dev->mt76);
 
        return ret;
index 989f05e..d0c64a9 100644 (file)
@@ -20,7 +20,6 @@
                                         MT7615_MAX_INTERFACES)
 
 #define MT7615_PM_TIMEOUT              (HZ / 12)
-#define MT7615_WATCHDOG_TIME           (HZ / 10)
 #define MT7615_HW_SCAN_TIMEOUT         (HZ / 10)
 #define MT7615_RESET_TIMEOUT           (30 * HZ)
 #define MT7615_RATE_RETRY              2
@@ -202,6 +201,7 @@ struct mt7615_phy {
 #define mt7615_mcu_set_pm(dev, ...)    (dev)->mcu_ops->set_pm_state((dev),  __VA_ARGS__)
 #define mt7615_mcu_set_drv_ctrl(dev)   (dev)->mcu_ops->set_drv_ctrl((dev))
 #define mt7615_mcu_set_fw_ctrl(dev)    (dev)->mcu_ops->set_fw_ctrl((dev))
+#define mt7615_mcu_set_sta_decap_offload(dev, ...) (dev)->mcu_ops->set_sta_decap_offload((dev), __VA_ARGS__)
 struct mt7615_mcu_ops {
        int (*add_tx_ba)(struct mt7615_dev *dev,
                         struct ieee80211_ampdu_params *params,
@@ -221,6 +221,9 @@ struct mt7615_mcu_ops {
        int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
        int (*set_drv_ctrl)(struct mt7615_dev *dev);
        int (*set_fw_ctrl)(struct mt7615_dev *dev);
+       int (*set_sta_decap_offload)(struct mt7615_dev *dev,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_sta *sta);
 };
 
 struct mt7615_dev {
@@ -356,6 +359,7 @@ static inline int mt7622_wmac_init(struct mt7615_dev *dev)
 }
 #endif
 
+int mt7615_thermal_init(struct mt7615_dev *dev);
 int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
                      int irq, const u32 *map);
 u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
@@ -456,6 +460,12 @@ static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
        return MT_INT_TX_DONE(dev->mt76.q_mcu[MT_MCUQ_WM]->hw_idx);
 }
 
+static inline unsigned long
+mt7615_get_macwork_timeout(struct mt7615_dev *dev)
+{
+       return dev->pm.enable ? HZ / 3 : HZ / 10;
+}
+
 void mt7615_dma_reset(struct mt7615_dev *dev);
 void mt7615_scan_work(struct work_struct *work);
 void mt7615_roc_work(struct work_struct *work);
@@ -466,7 +476,7 @@ int mt7615_set_channel(struct mt7615_phy *phy);
 void mt7615_init_work(struct mt7615_dev *dev);
 
 int mt7615_mcu_restart(struct mt76_dev *dev);
-void mt7615_update_channel(struct mt76_dev *mdev);
+void mt7615_update_channel(struct mt76_phy *mphy);
 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
 void mt7615_mac_reset_counters(struct mt7615_dev *dev);
 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
@@ -494,7 +504,7 @@ u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg);
 int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val);
 int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
 int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
-int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev);
 int mt7615_mcu_set_tx_power(struct mt7615_phy *phy);
 void mt7615_mcu_exit(struct mt7615_dev *dev);
 void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
@@ -518,9 +528,6 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
 void mt7615_mac_work(struct work_struct *work);
 void mt7615_txp_skb_unmap(struct mt76_dev *dev,
                          struct mt76_txwi_cache *txwi);
-int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
-                                   struct ieee80211_vif *vif,
-                                   struct ieee80211_sta *sta);
 int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev);
 int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
 int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
index ec8ec1a..a2465b4 100644 (file)
@@ -98,7 +98,7 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
        addr = mt7615_reg_map(dev, MT_LED_CTRL);
        mt76_wr(dev, addr, val);
 
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 }
 
 static int
@@ -147,8 +147,12 @@ int mt7615_register_device(struct mt7615_dev *dev)
        if (ret)
                return ret;
 
-       ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
-                                  ARRAY_SIZE(mt7615_rates));
+       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
+       if (ret)
+               return ret;
+
+       ret = mt7615_thermal_init(dev);
        if (ret)
                return ret;
 
index d7cbef7..da87c02 100644 (file)
@@ -131,20 +131,21 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                          struct mt76_tx_info *tx_info)
 {
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-       struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
        struct ieee80211_key_conf *key = info->control.hw_key;
        int pid, id;
        u8 *txwi = (u8 *)txwi_ptr;
        struct mt76_txwi_cache *t;
+       struct mt7615_sta *msta;
        void *txp;
 
+       msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
        if (!wcid)
                wcid = &dev->mt76.global_wcid;
 
        pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
 
-       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+       if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
                struct mt7615_phy *phy = &dev->phy;
 
                if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
@@ -267,6 +268,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
        struct mt7615_phy *phy2;
        struct mt76_phy *ext_phy;
        struct mt7615_dev *dev;
+       unsigned long timeout;
 
        dev = container_of(work, struct mt7615_dev, reset_work);
        ext_phy = dev->mt76.phy2;
@@ -344,11 +346,11 @@ void mt7615_mac_reset_work(struct work_struct *work)
 
        mt7615_mutex_release(dev);
 
+       timeout = mt7615_get_macwork_timeout(dev);
        ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
-                                    MT7615_WATCHDOG_TIME);
+                                    timeout);
        if (phy2)
                ieee80211_queue_delayed_work(ext_phy->hw,
-                                            &phy2->mt76->mac_work,
-                                            MT7615_WATCHDOG_TIME);
+                                            &phy2->mt76->mac_work, timeout);
 
 }
index 63c081b..6712ad9 100644 (file)
@@ -463,7 +463,9 @@ enum mt7615_reg_base {
 #define MT_LPON_TCR0(_n)               MT_LPON(0x010 + ((_n) * 4))
 #define MT_LPON_TCR2(_n)               MT_LPON(0x0f8 + ((_n) - 2) * 4)
 #define MT_LPON_TCR_MODE               GENMASK(1, 0)
+#define MT_LPON_TCR_READ               GENMASK(1, 0)
 #define MT_LPON_TCR_WRITE              BIT(0)
+#define MT_LPON_TCR_ADJUST             BIT(1)
 
 #define MT_LPON_UTTR0                  MT_LPON(0x018)
 #define MT_LPON_UTTR1                  MT_LPON(0x01c)
index 0518097..03877d8 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+/* SPDX-License-Identifier: ISC */
 /* Copyright (C) 2020 MediaTek Inc.
  *
  * Author: Sean Wang <sean.wang@mediatek.com>
index d1be78b..45c1cd3 100644 (file)
@@ -55,6 +55,7 @@ static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
        struct mt76_phy *mphy = &dev->mt76.phy;
+       struct mt76_connac_pm *pm = &dev->pm;
        u32 status;
        int ret;
 
@@ -66,37 +67,45 @@ static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
                                 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
        if (ret < 0) {
                dev_err(dev->mt76.dev, "Cannot get ownership from device");
-               set_bit(MT76_STATE_PM, &mphy->state);
-               sdio_release_host(func);
+       } else {
+               clear_bit(MT76_STATE_PM, &mphy->state);
 
-               return ret;
+               pm->stats.last_wake_event = jiffies;
+               pm->stats.doze_time += pm->stats.last_wake_event -
+                                      pm->stats.last_doze_event;
        }
-
        sdio_release_host(func);
-       dev->pm.last_activity = jiffies;
 
-       return 0;
+       return ret;
 }
 
 static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
 {
        struct mt76_phy *mphy = &dev->mt76.phy;
+       int ret = 0;
 
-       if (test_and_clear_bit(MT76_STATE_PM, &mphy->state))
-               return __mt7663s_mcu_drv_pmctrl(dev);
+       mutex_lock(&dev->pm.mutex);
 
-       return 0;
+       if (test_bit(MT76_STATE_PM, &mphy->state))
+               ret = __mt7663s_mcu_drv_pmctrl(dev);
+
+       mutex_unlock(&dev->pm.mutex);
+
+       return ret;
 }
 
 static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
        struct mt76_phy *mphy = &dev->mt76.phy;
+       struct mt76_connac_pm *pm = &dev->pm;
+       int ret = 0;
        u32 status;
-       int ret;
 
-       if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
-               return 0;
+       mutex_lock(&pm->mutex);
+
+       if (mt76_connac_skip_fw_pmctrl(mphy, pm))
+               goto out;
 
        sdio_claim_host(func);
 
@@ -107,9 +116,15 @@ static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
        if (ret < 0) {
                dev_err(dev->mt76.dev, "Cannot set ownership to device");
                clear_bit(MT76_STATE_PM, &mphy->state);
+       } else {
+               pm->stats.last_doze_event = jiffies;
+               pm->stats.awake_time += pm->stats.last_doze_event -
+                                       pm->stats.last_wake_event;
        }
 
        sdio_release_host(func);
+out:
+       mutex_unlock(&pm->mutex);
 
        return ret;
 }
index 4393dd2..04f4c89 100644 (file)
@@ -283,9 +283,15 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
 {
        struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
                                              txrx_worker);
-       struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+       struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio);
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
        int i, nframes, ret;
 
+       if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
+               queue_work(mdev->wq, &dev->pm.wake_work);
+               return;
+       }
+
        /* disable interrupt */
        sdio_claim_host(sdio->func);
        sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
@@ -295,16 +301,16 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
 
                /* tx */
                for (i = 0; i <= MT_TXQ_PSD; i++) {
-                       ret = mt7663s_tx_run_queue(dev, dev->phy.q_tx[i]);
+                       ret = mt7663s_tx_run_queue(mdev, mdev->phy.q_tx[i]);
                        if (ret > 0)
                                nframes += ret;
                }
-               ret = mt7663s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
+               ret = mt7663s_tx_run_queue(mdev, mdev->q_mcu[MT_MCUQ_WM]);
                if (ret > 0)
                        nframes += ret;
 
                /* rx */
-               ret = mt7663s_rx_handler(dev);
+               ret = mt7663s_rx_handler(mdev);
                if (ret > 0)
                        nframes += ret;
        } while (nframes > 0);
@@ -312,6 +318,8 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
        /* enable interrupt */
        sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
        sdio_release_host(sdio->func);
+
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 }
 
 void mt7663s_sdio_irq(struct sdio_func *func)
index f8d3673..996d48c 100644 (file)
@@ -123,7 +123,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
        idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
        addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
 
-       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
+       mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
        val = mt76_rr(dev, MT_LPON_UTTR0);
        sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
 
@@ -191,14 +191,15 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                                   struct ieee80211_sta *sta,
                                   struct mt76_tx_info *tx_info)
 {
-       struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
        struct sk_buff *skb = tx_info->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct mt7615_sta *msta;
        int pad;
 
+       msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
        if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
-           !msta->rate_probe) {
+           msta && !msta->rate_probe) {
                /* request to configure sampling rate */
                spin_lock_bh(&dev->mt76.lock);
                mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
@@ -323,8 +324,8 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
                        hw->max_tx_fragments = 1;
        }
 
-       err = mt76_register_device(&dev->mt76, true, mt7615_rates,
-                                  ARRAY_SIZE(mt7615_rates));
+       err = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
        if (err < 0)
                return err;
 
index 6c889b9..f49d97d 100644 (file)
@@ -7,12 +7,13 @@
 #include "mt76.h"
 
 #define MT76_CONNAC_SCAN_IE_LEN                        600
-#define MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL    10
+#define MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL         10
+#define MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL U16_MAX
 #define MT76_CONNAC_MAX_SCHED_SCAN_SSID                10
 #define MT76_CONNAC_MAX_SCAN_MATCH             16
 
 #define MT76_CONNAC_COREDUMP_TIMEOUT           (HZ / 20)
-#define MT76_CONNAC_COREDUMP_SZ                        (128 * 1024)
+#define MT76_CONNAC_COREDUMP_SZ                        (1300 * 1024)
 
 enum {
        CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
@@ -45,6 +46,8 @@ enum {
 
 struct mt76_connac_pm {
        bool enable;
+       bool ds_enable;
+       bool suspended;
 
        spinlock_t txq_lock;
        struct {
@@ -116,19 +119,27 @@ out:
 }
 
 static inline void
-mt76_connac_pm_unref(struct mt76_connac_pm *pm)
+mt76_connac_pm_unref(struct mt76_phy *phy, struct mt76_connac_pm *pm)
 {
        spin_lock_bh(&pm->wake.lock);
-       pm->wake.count--;
+
        pm->last_activity = jiffies;
+       if (--pm->wake.count == 0 &&
+           test_bit(MT76_STATE_MCU_RUNNING, &phy->state))
+               mt76_connac_power_save_sched(phy, pm);
+
        spin_unlock_bh(&pm->wake.lock);
 }
 
 static inline bool
 mt76_connac_skip_fw_pmctrl(struct mt76_phy *phy, struct mt76_connac_pm *pm)
 {
+       struct mt76_dev *dev = phy->dev;
        bool ret;
 
+       if (dev->token_count)
+               return true;
+
        spin_lock_bh(&pm->wake.lock);
        ret = pm->wake.count || test_and_set_bit(MT76_STATE_PM, &phy->state);
        spin_unlock_bh(&pm->wake.lock);
index 6f180c9..af43bcb 100644 (file)
@@ -10,13 +10,16 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
        if (!pm->enable)
                return 0;
 
-       if (!mt76_is_mmio(dev))
+       if (mt76_is_usb(dev))
                return 0;
 
        cancel_delayed_work_sync(&pm->ps_work);
        if (!test_bit(MT76_STATE_PM, &phy->state))
                return 0;
 
+       if (pm->suspended)
+               return 0;
+
        queue_work(dev->wq, &pm->wake_work);
        if (!wait_event_timeout(pm->wait,
                                !test_bit(MT76_STATE_PM, &phy->state),
@@ -34,12 +37,15 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy,
 {
        struct mt76_dev *dev = phy->dev;
 
-       if (!mt76_is_mmio(dev))
+       if (mt76_is_usb(dev))
                return;
 
        if (!pm->enable)
                return;
 
+       if (pm->suspended)
+               return;
+
        pm->last_activity = jiffies;
 
        if (!test_bit(MT76_STATE_PM, &phy->state)) {
index 6195616..5c3a81e 100644 (file)
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
 void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
                                   struct ieee80211_vif *vif,
                                   struct ieee80211_sta *sta,
-                                  bool enable)
+                                  bool enable, bool newly)
 {
        struct sta_rec_basic *basic;
        struct tlv *tlv;
@@ -316,7 +316,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
        basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
 
        if (enable) {
-               basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+               if (newly)
+                       basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
                basic->conn_state = CONN_STATE_PORT_SECURE;
        } else {
                basic->conn_state = CONN_STATE_DISCONNECT;
@@ -393,6 +394,7 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
 }
 
 void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
+                                       struct ieee80211_vif *vif,
                                        struct mt76_wcid *wcid,
                                        void *sta_wtbl, void *wtbl_tlv)
 {
@@ -404,9 +406,46 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
                                             wtbl_tlv, sta_wtbl);
        htr = (struct wtbl_hdr_trans *)tlv;
        htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
+
+       if (vif->type == NL80211_IFTYPE_STATION)
+               htr->to_ds = true;
+       else
+               htr->from_ds = true;
+
+       if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
+               htr->to_ds = true;
+               htr->from_ds = true;
+       }
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_hdr_trans_tlv);
 
+int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
+                                        struct ieee80211_vif *vif,
+                                        struct mt76_wcid *wcid, int cmd)
+{
+       struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+       struct wtbl_req_hdr *wtbl_hdr;
+       struct tlv *sta_wtbl;
+       struct sk_buff *skb;
+
+       skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+                                          sizeof(struct tlv));
+
+       wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET,
+                                                 sta_wtbl, &skb);
+       if (IS_ERR(wtbl_hdr))
+               return PTR_ERR(wtbl_hdr);
+
+       mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, sta_wtbl, wtbl_hdr);
+
+       return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
+
 void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
                                      struct sk_buff *skb,
                                      struct ieee80211_vif *vif,
@@ -671,7 +710,7 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
 void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
                             struct ieee80211_sta *sta,
                             struct ieee80211_vif *vif,
-                            u8 rcpi)
+                            u8 rcpi, u8 sta_state)
 {
        struct cfg80211_chan_def *chandef = &mphy->chandef;
        enum nl80211_band band = chandef->chan->band;
@@ -736,7 +775,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
 
        tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
        state = (struct sta_rec_state *)tlv;
-       state->state = 2;
+       state->state = sta_state;
 
        if (sta->vht_cap.vht_supported) {
                state->vht_opmode = sta->bandwidth;
@@ -828,8 +867,8 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
 
-int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
-                               struct mt76_sta_cmd_info *info)
+int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
+                           struct mt76_sta_cmd_info *info)
 {
        struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
        struct mt76_dev *dev = phy->dev;
@@ -841,10 +880,13 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
-       mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta, info->enable);
-       if (info->enable && info->sta)
-               mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif,
-                                       info->rcpi);
+       if (info->sta || !info->offload_fw)
+               mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta,
+                                             info->enable, info->newly);
+       if (info->sta && info->enable)
+               mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
+                                       info->vif, info->rcpi,
+                                       info->state);
 
        sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
                                           sizeof(struct tlv));
@@ -859,6 +901,8 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
                mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
                                                 info->sta, sta_wtbl,
                                                 wtbl_hdr);
+               mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
+                                                  sta_wtbl, wtbl_hdr);
                if (info->sta)
                        mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
                                                    sta_wtbl, wtbl_hdr);
@@ -866,7 +910,7 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
 
        return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
 }
-EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_sta_cmd);
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_cmd);
 
 void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                 struct ieee80211_ampdu_params *params,
@@ -895,8 +939,10 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                ba->rst_ba_sb = 1;
        }
 
-       if (is_mt7921(dev))
+       if (is_mt7921(dev)) {
+               ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
                return;
+       }
 
        if (enable && tx) {
                u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
@@ -1271,6 +1317,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
                                u8 pad[3];
                        } __packed hdr;
                        struct bss_info_uni_he he;
+                       struct bss_info_uni_bss_color bss_color;
                } he_req = {
                        .hdr = {
                                .bss_idx = mvif->idx,
@@ -1279,8 +1326,21 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
                                .tag = cpu_to_le16(UNI_BSS_INFO_HE_BASIC),
                                .len = cpu_to_le16(sizeof(struct bss_info_uni_he)),
                        },
+                       .bss_color = {
+                               .tag = cpu_to_le16(UNI_BSS_INFO_BSS_COLOR),
+                               .len = cpu_to_le16(sizeof(struct bss_info_uni_bss_color)),
+                               .enable = 0,
+                               .bss_color = 0,
+                       },
                };
 
+               if (enable) {
+                       he_req.bss_color.enable =
+                               vif->bss_conf.he_bss_color.enabled;
+                       he_req.bss_color.bss_color =
+                               vif->bss_conf.he_bss_color.color;
+               }
+
                mt76_connac_mcu_uni_bss_he_tlv(phy, vif,
                                               (struct tlv *)&he_req.he);
                err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE,
@@ -1463,14 +1523,16 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
        req->version = 1;
        req->seq_num = mvif->scan_seq_num | ext_phy << 7;
 
-       if (is_mt7663(phy->dev) &&
-           (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) {
-               get_random_mask_addr(req->mt7663.random_mac, sreq->mac_addr,
-                                    sreq->mac_addr_mask);
+       if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+               u8 *addr = is_mt7663(phy->dev) ? req->mt7663.random_mac
+                                              : req->mt7921.random_mac;
+
                req->scan_func = 1;
-       } else if (is_mt7921(phy->dev)) {
-               req->mt7921.bss_idx = mvif->idx;
+               get_random_mask_addr(addr, sreq->mac_addr,
+                                    sreq->mac_addr_mask);
        }
+       if (is_mt7921(phy->dev))
+               req->mt7921.bss_idx = mvif->idx;
 
        req->ssids_num = sreq->n_ssids;
        for (i = 0; i < req->ssids_num; i++) {
@@ -1556,6 +1618,26 @@ int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
 
+int mt76_connac_sta_state_dp(struct mt76_dev *dev,
+                            enum ieee80211_sta_state old_state,
+                            enum ieee80211_sta_state new_state)
+{
+       if ((old_state == IEEE80211_STA_ASSOC &&
+            new_state == IEEE80211_STA_AUTHORIZED) ||
+           (old_state == IEEE80211_STA_NONE &&
+            new_state == IEEE80211_STA_NOTEXIST))
+               mt76_connac_mcu_set_deep_sleep(dev, true);
+
+       if ((old_state == IEEE80211_STA_NOTEXIST &&
+            new_state == IEEE80211_STA_NONE) ||
+           (old_state == IEEE80211_STA_AUTHORIZED &&
+            new_state == IEEE80211_STA_ASSOC))
+               mt76_connac_mcu_set_deep_sleep(dev, false);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_sta_state_dp);
+
 void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
                                    struct mt76_connac_coredump *coredump)
 {
@@ -1570,6 +1652,60 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
 
+int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy)
+{
+       struct mt76_connac_cap_hdr {
+               __le16 n_element;
+               u8 rsv[2];
+       } __packed * hdr;
+       struct sk_buff *skb;
+       int ret, i;
+
+       ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CMD_GET_NIC_CAPAB, NULL,
+                                       0, true, &skb);
+       if (ret)
+               return ret;
+
+       hdr = (struct mt76_connac_cap_hdr *)skb->data;
+       if (skb->len < sizeof(*hdr)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       skb_pull(skb, sizeof(*hdr));
+
+       for (i = 0; i < le16_to_cpu(hdr->n_element); i++) {
+               struct tlv_hdr {
+                       __le32 type;
+                       __le32 len;
+               } __packed * tlv = (struct tlv_hdr *)skb->data;
+               int len;
+
+               if (skb->len < sizeof(*tlv))
+                       break;
+
+               skb_pull(skb, sizeof(*tlv));
+
+               len = le32_to_cpu(tlv->len);
+               if (skb->len < len)
+                       break;
+
+               switch (le32_to_cpu(tlv->type)) {
+               case MT_NIC_CAP_6G:
+                       phy->cap.has_6ghz = skb->data[0];
+                       break;
+               default:
+                       break;
+               }
+               skb_pull(skb, len);
+       }
+out:
+       dev_kfree_skb(skb);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_get_nic_capability);
+
 static void
 mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
                          struct mt76_power_limits *limits,
@@ -1632,12 +1768,15 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
                142, 144, 149, 151, 153, 155, 157,
                159, 161, 165
        };
+       int i, n_chan, batch_size, idx = 0, tx_power, last_ch;
        struct mt76_connac_sku_tlv sku_tlbv;
-       int i, n_chan, batch_size, idx = 0;
        struct mt76_power_limits limits;
        const u8 *ch_list;
 
        sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
+       tx_power = 2 * phy->hw->conf.power_level;
+       if (!tx_power)
+               tx_power = 127;
 
        if (band == NL80211_BAND_2GHZ) {
                n_chan = ARRAY_SIZE(chan_list_2ghz);
@@ -1648,39 +1787,48 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
        }
        batch_size = DIV_ROUND_UP(n_chan, batch_len);
 
+       if (!phy->cap.has_5ghz)
+               last_ch = chan_list_2ghz[n_chan - 1];
+       else
+               last_ch = chan_list_5ghz[n_chan - 1];
+
        for (i = 0; i < batch_size; i++) {
-               bool last_msg = i == batch_size - 1;
-               int num_ch = last_msg ? n_chan % batch_len : batch_len;
                struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {
                        .band = band == NL80211_BAND_2GHZ ? 1 : 2,
-                       .n_chan = num_ch,
-                       .last_msg = last_msg,
                };
+               int j, err, msg_len, num_ch;
                struct sk_buff *skb;
-               int j, err, msg_len;
 
+               num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
                msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
                skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
                if (!skb)
                        return -ENOMEM;
 
+               skb_reserve(skb, sizeof(tx_power_tlv));
+
                BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2));
                memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2));
+               tx_power_tlv.n_chan = num_ch;
 
-               skb_put_data(skb, &tx_power_tlv, sizeof(tx_power_tlv));
                for (j = 0; j < num_ch; j++, idx++) {
                        struct ieee80211_channel chan = {
                                .hw_value = ch_list[idx],
                                .band = band,
                        };
 
-                       mt76_get_rate_power_limits(phy, &chan, &limits, 127);
+                       mt76_get_rate_power_limits(phy, &chan, &limits,
+                                                  tx_power);
 
+                       tx_power_tlv.last_msg = ch_list[idx] == last_ch;
                        sku_tlbv.channel = ch_list[idx];
+
                        mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit,
                                                  &limits, band);
                        skb_put_data(skb, &sku_tlbv, sku_len);
                }
+               __skb_push(skb, sizeof(tx_power_tlv));
+               memcpy(skb->data, &tx_power_tlv, sizeof(tx_power_tlv));
 
                err = mt76_mcu_skb_send_msg(dev, skb,
                                            MCU_CMD_SET_RATE_TX_POWER, false);
@@ -1695,11 +1843,20 @@ int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
 {
        int err;
 
-       err = mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_2GHZ);
-       if (err < 0)
-               return err;
+       if (phy->cap.has_2ghz) {
+               err = mt76_connac_mcu_rate_txpower_band(phy,
+                                                       NL80211_BAND_2GHZ);
+               if (err < 0)
+                       return err;
+       }
+       if (phy->cap.has_5ghz) {
+               err = mt76_connac_mcu_rate_txpower_band(phy,
+                                                       NL80211_BAND_5GHZ);
+               if (err < 0)
+                       return err;
+       }
 
-       return mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_5GHZ);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
 
@@ -1939,7 +2096,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
        ptlv->index = index;
 
        memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
-       memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
+       memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8));
 
        return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
 }
@@ -1974,14 +2131,17 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
        };
 
        if (wowlan->magic_pkt)
-               req.wow_ctrl_tlv.trigger |= BIT(0);
+               req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC;
        if (wowlan->disconnect)
-               req.wow_ctrl_tlv.trigger |= BIT(2);
+               req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT |
+                                            UNI_WOW_DETECT_TYPE_BCN_LOST);
        if (wowlan->nd_config) {
                mt76_connac_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
-               req.wow_ctrl_tlv.trigger |= BIT(5);
+               req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT;
                mt76_connac_mcu_sched_scan_enable(phy, vif, suspend);
        }
+       if (wowlan->n_patterns)
+               req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP;
 
        if (mt76_is_mmio(dev))
                req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;
index a109686..1c73beb 100644 (file)
@@ -559,6 +559,7 @@ enum {
        MCU_CMD_SET_RATE_TX_POWER = MCU_CE_PREFIX | 0x5d,
        MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
        MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
+       MCU_CMD_GET_NIC_CAPAB = MCU_CE_PREFIX | 0x8a,
        MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
        MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
        MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
@@ -575,6 +576,7 @@ enum {
 enum {
        UNI_BSS_INFO_BASIC = 0,
        UNI_BSS_INFO_RLM = 2,
+       UNI_BSS_INFO_BSS_COLOR = 4,
        UNI_BSS_INFO_HE_BASIC = 5,
        UNI_BSS_INFO_BCN_CONTENT = 7,
        UNI_BSS_INFO_QBSS = 15,
@@ -590,6 +592,36 @@ enum {
        UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
 };
 
+enum {
+       MT_NIC_CAP_TX_RESOURCE,
+       MT_NIC_CAP_TX_EFUSE_ADDR,
+       MT_NIC_CAP_COEX,
+       MT_NIC_CAP_SINGLE_SKU,
+       MT_NIC_CAP_CSUM_OFFLOAD,
+       MT_NIC_CAP_HW_VER,
+       MT_NIC_CAP_SW_VER,
+       MT_NIC_CAP_MAC_ADDR,
+       MT_NIC_CAP_PHY,
+       MT_NIC_CAP_MAC,
+       MT_NIC_CAP_FRAME_BUF,
+       MT_NIC_CAP_BEAM_FORM,
+       MT_NIC_CAP_LOCATION,
+       MT_NIC_CAP_MUMIMO,
+       MT_NIC_CAP_BUFFER_MODE_INFO,
+       MT_NIC_CAP_HW_ADIE_VERSION = 0x14,
+       MT_NIC_CAP_ANTSWP = 0x16,
+       MT_NIC_CAP_WFDMA_REALLOC,
+       MT_NIC_CAP_6G,
+};
+
+#define UNI_WOW_DETECT_TYPE_MAGIC              BIT(0)
+#define UNI_WOW_DETECT_TYPE_ANY                        BIT(1)
+#define UNI_WOW_DETECT_TYPE_DISCONNECT         BIT(2)
+#define UNI_WOW_DETECT_TYPE_GTK_REKEY_FAIL     BIT(3)
+#define UNI_WOW_DETECT_TYPE_BCN_LOST           BIT(4)
+#define UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT       BIT(5)
+#define UNI_WOW_DETECT_TYPE_BITMAP             BIT(6)
+
 enum {
        UNI_SUSPEND_MODE_SETTING,
        UNI_SUSPEND_WOW_CTRL,
@@ -762,7 +794,7 @@ struct mt76_connac_sched_scan_req {
        u8 intervals_num;
        u8 scan_func; /* MT7663: BIT(0) eable random mac address */
        struct mt76_connac_mcu_scan_channel channels[64];
-       __le16 intervals[MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL];
+       __le16 intervals[MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL];
        union {
                struct {
                        u8 random_mac[ETH_ALEN];
@@ -770,7 +802,9 @@ struct mt76_connac_sched_scan_req {
                } mt7663;
                struct {
                        u8 bss_idx;
-                       u8 pad2[63];
+                       u8 pad2[19];
+                       u8 random_mac[ETH_ALEN];
+                       u8 pad3[38];
                } mt7921;
        };
 } __packed;
@@ -781,6 +815,14 @@ struct mt76_connac_sched_scan_done {
        __le16 pad;
 } __packed;
 
+struct bss_info_uni_bss_color {
+       __le16 tag;
+       __le16 len;
+       u8 enable;
+       u8 bss_color;
+       u8 rsv[2];
+} __packed;
+
 struct bss_info_uni_he {
        __le16 tag;
        __le16 len;
@@ -885,15 +927,24 @@ struct mt76_connac_suspend_tlv {
        u8 pad[5];
 } __packed;
 
+enum mt76_sta_info_state {
+       MT76_STA_INFO_STATE_NONE,
+       MT76_STA_INFO_STATE_AUTH,
+       MT76_STA_INFO_STATE_ASSOC
+};
+
 struct mt76_sta_cmd_info {
        struct ieee80211_sta *sta;
        struct mt76_wcid *wcid;
 
        struct ieee80211_vif *vif;
 
+       bool offload_fw;
        bool enable;
+       bool newly;
        int cmd;
        u8 rcpi;
+       u8 state;
 };
 
 #define MT_SKU_POWER_LIMIT     161
@@ -963,18 +1014,23 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy);
 int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
 void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
                                   struct ieee80211_vif *vif,
-                                  struct ieee80211_sta *sta, bool enable);
+                                  struct ieee80211_sta *sta, bool enable,
+                                  bool newly);
 void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                      struct ieee80211_vif *vif,
                                      struct ieee80211_sta *sta, void *sta_wtbl,
                                      void *wtbl_tlv);
 void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
+                                       struct ieee80211_vif *vif,
                                        struct mt76_wcid *wcid,
                                        void *sta_wtbl, void *wtbl_tlv);
+int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
+                                        struct ieee80211_vif *vif,
+                                        struct mt76_wcid *wcid, int cmd);
 void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
                             struct ieee80211_sta *sta,
                             struct ieee80211_vif *vif,
-                            u8 rcpi);
+                            u8 rcpi, u8 state);
 void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                 struct ieee80211_sta *sta, void *sta_wtbl,
                                 void *wtbl_tlv);
@@ -996,8 +1052,8 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
                                struct ieee80211_vif *vif,
                                struct mt76_wcid *wcid,
                                bool enable);
-int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
-                               struct mt76_sta_cmd_info *info);
+int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
+                           struct mt76_sta_cmd_info *info);
 void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
                                      struct ieee80211_vif *vif);
 int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band);
@@ -1008,6 +1064,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
 int mt76_connac_mcu_start_patch(struct mt76_dev *dev);
 int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get);
 int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option);
+int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy);
 
 int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
                            struct ieee80211_scan_request *scan_req);
@@ -1028,6 +1085,9 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
 int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
 void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
                                      struct ieee80211_vif *vif);
+int mt76_connac_sta_state_dp(struct mt76_dev *dev,
+                            enum ieee80211_sta_state old_state,
+                            enum ieee80211_sta_state new_state);
 int mt76_connac_mcu_chip_config(struct mt76_dev *dev);
 int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable);
 void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
index dd66fd1..cea2421 100644 (file)
@@ -68,7 +68,7 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
                nic_conf1 &= 0xff00;
 
        if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
-               dev_err(dev->mt76.dev,
+               dev_dbg(dev->mt76.dev,
                        "driver does not support HW RF ctrl\n");
 
        if (!mt76x02_field_valid(nic_conf0 >> 8))
index 0da3786..c32e6dc 100644 (file)
@@ -34,24 +34,24 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
 {
        memset(key_data, 0, 32);
        if (!key)
-               return MT_CIPHER_NONE;
+               return MT76X02_CIPHER_NONE;
 
        if (key->keylen > 32)
-               return MT_CIPHER_NONE;
+               return MT76X02_CIPHER_NONE;
 
        memcpy(key_data, key->key, key->keylen);
 
        switch (key->cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
+               return MT76X02_CIPHER_WEP40;
        case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
+               return MT76X02_CIPHER_WEP104;
        case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
+               return MT76X02_CIPHER_TKIP;
        case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
+               return MT76X02_CIPHER_AES_CCMP;
        default:
-               return MT_CIPHER_NONE;
+               return MT76X02_CIPHER_NONE;
        }
 }
 
@@ -63,7 +63,7 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
        u32 val;
 
        cipher = mt76x02_mac_get_key_info(key, key_data);
-       if (cipher == MT_CIPHER_NONE && key)
+       if (cipher == MT76X02_CIPHER_NONE && key)
                return -EOPNOTSUPP;
 
        val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
@@ -91,10 +91,10 @@ void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
        eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
 
        pn = (u64)eiv << 16;
-       if (cipher == MT_CIPHER_TKIP) {
+       if (cipher == MT76X02_CIPHER_TKIP) {
                pn |= (iv >> 16) & 0xff;
                pn |= (iv & 0xff) << 8;
-       } else if (cipher >= MT_CIPHER_AES_CCMP) {
+       } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
                pn |= iv & 0xffff;
        } else {
                return;
@@ -112,7 +112,7 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
        u64 pn;
 
        cipher = mt76x02_mac_get_key_info(key, key_data);
-       if (cipher == MT_CIPHER_NONE && key)
+       if (cipher == MT76X02_CIPHER_NONE && key)
                return -EOPNOTSUPP;
 
        mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
@@ -126,16 +126,16 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
                pn = atomic64_read(&key->tx_pn);
 
                iv_data[3] = key->keyidx << 6;
-               if (cipher >= MT_CIPHER_TKIP) {
+               if (cipher >= MT76X02_CIPHER_TKIP) {
                        iv_data[3] |= 0x20;
                        put_unaligned_le32(pn >> 16, &iv_data[4]);
                }
 
-               if (cipher == MT_CIPHER_TKIP) {
+               if (cipher == MT76X02_CIPHER_TKIP) {
                        iv_data[0] = (pn >> 8) & 0xff;
                        iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
                        iv_data[2] = pn & 0xff;
-               } else if (cipher >= MT_CIPHER_AES_CCMP) {
+               } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
                        put_unaligned_le16((pn & 0xffff), &iv_data[0]);
                }
        }
@@ -1022,12 +1022,12 @@ void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
                mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
 }
 
-void mt76x02_update_channel(struct mt76_dev *mdev)
+void mt76x02_update_channel(struct mt76_phy *mphy)
 {
-       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
        struct mt76_channel_state *state;
 
-       state = mdev->phy.chan_state;
+       state = mphy->chan_state;
        state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
 
        spin_lock_bh(&dev->mt76.cc_lock);
@@ -1169,7 +1169,7 @@ void mt76x02_mac_work(struct work_struct *work)
 
        mutex_lock(&dev->mt76.mutex);
 
-       mt76_update_survey(&dev->mt76);
+       mt76_update_survey(&dev->mphy);
        for (i = 0, idx = 0; i < 16; i++) {
                u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
 
index 0cfbaca..5dc6c83 100644 (file)
@@ -195,7 +195,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
                            struct ieee80211_sta *sta, int len);
 void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
 void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-void mt76x02_update_channel(struct mt76_dev *mdev);
+void mt76x02_update_channel(struct mt76_phy *mphy);
 void mt76x02_mac_work(struct work_struct *work);
 
 void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);
index 3e72227..fa7872a 100644 (file)
@@ -692,15 +692,15 @@ struct mt76_wcid_key {
 } __packed __aligned(4);
 
 enum mt76x02_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_CKIP40,
-       MT_CIPHER_CKIP104,
-       MT_CIPHER_CKIP128,
-       MT_CIPHER_WAPI,
+       MT76X02_CIPHER_NONE,
+       MT76X02_CIPHER_WEP40,
+       MT76X02_CIPHER_WEP104,
+       MT76X02_CIPHER_TKIP,
+       MT76X02_CIPHER_AES_CCMP,
+       MT76X02_CIPHER_CKIP40,
+       MT76X02_CIPHER_CKIP104,
+       MT76X02_CIPHER_CKIP128,
+       MT76X02_CIPHER_WAPI,
 };
 
 #endif
index 02db5d6..ccdbab3 100644 (file)
@@ -7,24 +7,18 @@
 #include <linux/module.h>
 #include "mt76x02.h"
 
-#define CCK_RATE(_idx, _rate) {                                        \
+#define MT76x02_CCK_RATE(_idx, _rate) {                                        \
        .bitrate = _rate,                                       \
        .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
        .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),            \
        .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + (_idx)),        \
 }
 
-#define OFDM_RATE(_idx, _rate) {                               \
-       .bitrate = _rate,                                       \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),           \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),     \
-}
-
 struct ieee80211_rate mt76x02_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
+       MT76x02_CCK_RATE(0, 10),
+       MT76x02_CCK_RATE(1, 20),
+       MT76x02_CCK_RATE(2, 55),
+       MT76x02_CCK_RATE(3, 110),
        OFDM_RATE(0, 60),
        OFDM_RATE(1, 90),
        OFDM_RATE(2, 120),
index 40c8061..80e4924 100644 (file)
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: ISC
 
 obj-$(CONFIG_MT7915E) += mt7915e.o
 
index 6a8ddee..6404824 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "mt7915.h"
 #include "eeprom.h"
+#include "mcu.h"
 
 /** global debugfs **/
 
@@ -16,7 +17,7 @@ mt7915_implicit_txbf_set(void *data, u64 val)
 
        dev->ibf = !!val;
 
-       return mt7915_mcu_set_txbf_type(dev);
+       return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
 }
 
 static int
@@ -147,6 +148,9 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
 {
        struct mt7915_dev *dev = s->private;
        bool ext_phy = phy != &dev->phy;
+       static const char * const bw[] = {
+               "BW20", "BW40", "BW80", "BW160"
+       };
        int cnt;
 
        if (!phy)
@@ -164,11 +168,16 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
        seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
 
        cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
-       seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld\n",
+       seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld",
                   FIELD_GET(MT_ETBF_RX_FB_ALL, cnt),
                   FIELD_GET(MT_ETBF_RX_FB_HE, cnt),
                   FIELD_GET(MT_ETBF_RX_FB_VHT, cnt),
                   FIELD_GET(MT_ETBF_RX_FB_HT, cnt));
+       cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
+       seq_printf(s, "%s, NC: %ld, NR: %ld\n",
+                  bw[FIELD_GET(MT_ETBF_RX_FB_BW, cnt)],
+                  FIELD_GET(MT_ETBF_RX_FB_NC, cnt),
+                  FIELD_GET(MT_ETBF_RX_FB_NR, cnt));
 
        /* Tx Beamformee Rx NDPA & Tx feedback report */
        cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
@@ -204,7 +213,7 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
        mt7915_txbf_stat_read_phy(mt7915_ext_phy(dev), file);
 
        /* Tx amsdu info */
-       seq_puts(file, "Tx MSDU stat:\n");
+       seq_puts(file, "Tx MSDU statistics:\n");
        for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) {
                stat[i] = mt76_rr(dev,  MT_PLE_AMSDU_PACK_MSDU_CNT(i));
                n += stat[i];
@@ -224,18 +233,6 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
 
 DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
 
-static int mt7915_read_temperature(struct seq_file *s, void *data)
-{
-       struct mt7915_dev *dev = dev_get_drvdata(s->private);
-       int temp;
-
-       /* cpu */
-       temp = mt7915_mcu_get_temperature(dev, 0);
-       seq_printf(s, "Temperature: %d\n", temp);
-
-       return 0;
-}
-
 static int
 mt7915_queues_acq(struct seq_file *s, void *data)
 {
@@ -307,54 +304,23 @@ mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy)
                "RU26", "RU52", "RU106", "RU242/SU20",
                "RU484/SU40", "RU996/SU80", "RU2x996/SU160"
        };
-       struct mt7915_dev *dev = dev_get_drvdata(s->private);
-       bool ext_phy = phy != &dev->phy;
-       u32 reg_base;
-       int i, idx = 0;
+       s8 txpower[MT7915_SKU_RATE_NUM], *buf;
+       int i;
 
        if (!phy)
                return;
 
-       reg_base = MT_TMAC_FP0R0(ext_phy);
-       seq_printf(s, "\nBand %d\n", ext_phy);
+       seq_printf(s, "\nBand %d\n", phy != &phy->dev->phy);
 
-       for (i = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
-               u8 cnt, mcs_num = mt7915_sku_group_len[i];
-               s8 txpower[12];
-               int j;
+       mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower));
+       for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
+               u8 mcs_num = mt7915_sku_group_len[i];
 
-               if (i == SKU_HT_BW20 || i == SKU_HT_BW40) {
-                       mcs_num = 8;
-               } else if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160) {
+               if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160)
                        mcs_num = 10;
-               } else if (i == SKU_HE_RU26) {
-                       reg_base = MT_TMAC_FP0R18(ext_phy);
-                       idx = 0;
-               }
-
-               for (j = 0, cnt = 0; j < DIV_ROUND_UP(mcs_num, 4); j++) {
-                       u32 val;
-
-                       if (i == SKU_VHT_BW160 && idx == 60) {
-                               reg_base = MT_TMAC_FP0R15(ext_phy);
-                               idx = 0;
-                       }
-
-                       val = mt76_rr(dev, reg_base + (idx / 4) * 4);
-
-                       if (idx && idx % 4)
-                               val >>= (idx % 4) * 8;
-
-                       while (val > 0 && cnt < mcs_num) {
-                               s8 pwr = FIELD_GET(MT_TMAC_FP_MASK, val);
-
-                               txpower[cnt++] = pwr;
-                               val >>= 8;
-                               idx++;
-                       }
-               }
 
-               mt76_seq_puts_array(s, sku_group_name[i], txpower, mcs_num);
+               mt76_seq_puts_array(s, sku_group_name[i], buf, mcs_num);
+               buf += mt7915_sku_group_len[i];
        }
 }
 
@@ -390,8 +356,6 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
        debugfs_create_file("radar_trigger", 0200, dir, dev,
                            &fops_radar_trigger);
        debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
-       debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
-                                   mt7915_read_temperature);
        debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
                                    mt7915_read_rate_txpower);
 
index 11d0b76..9182568 100644 (file)
@@ -19,39 +19,6 @@ int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
        return 0;
 }
 
-void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-                        struct sk_buff *skb)
-{
-       struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
-       __le32 *rxd = (__le32 *)skb->data;
-       enum rx_pkt_type type;
-
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
-
-       switch (type) {
-       case PKT_TYPE_TXRX_NOTIFY:
-               mt7915_mac_tx_free(dev, skb);
-               break;
-       case PKT_TYPE_RX_EVENT:
-               mt7915_mcu_rx_event(dev, skb);
-               break;
-#ifdef CONFIG_NL80211_TESTMODE
-       case PKT_TYPE_TXRXV:
-               mt7915_mac_fill_rx_vector(dev, skb);
-               break;
-#endif
-       case PKT_TYPE_NORMAL:
-               if (!mt7915_mac_fill_rx(dev, skb)) {
-                       mt76_rx(&dev->mt76, q, skb);
-                       return;
-               }
-               fallthrough;
-       default:
-               dev_kfree_skb(skb);
-               break;
-       }
-}
-
 static void
 mt7915_tx_cleanup(struct mt7915_dev *dev)
 {
@@ -112,8 +79,6 @@ void mt7915_dma_prefetch(struct mt7915_dev *dev)
 
 int mt7915_dma_init(struct mt7915_dev *dev)
 {
-       /* Increase buffer size to receive large VHT/HE MPDUs */
-       int rx_buf_size = MT_RX_BUF_SIZE * 2;
        u32 hif1_ofs = 0;
        int ret;
 
@@ -177,28 +142,28 @@ int mt7915_dma_init(struct mt7915_dev *dev)
        /* event from WM */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
                               MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
-                              rx_buf_size, MT_RX_EVENT_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
        if (ret)
                return ret;
 
        /* event from WA */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
                               MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
-                              rx_buf_size, MT_RX_EVENT_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
        if (ret)
                return ret;
 
        /* rx data queue */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
                               MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
-                              rx_buf_size, MT_RX_DATA_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
        if (ret)
                return ret;
 
        if (dev->dbdc_support) {
                ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
                                       MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
-                                      rx_buf_size,
+                                      MT_RX_BUF_SIZE,
                                       MT_RX_DATA_RING_BASE + hif1_ofs);
                if (ret)
                        return ret;
@@ -207,7 +172,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
                ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
                                       MT7915_RXQ_MCU_WA_EXT,
                                       MT7915_RX_MCU_RING_SIZE,
-                                      rx_buf_size,
+                                      MT_RX_BUF_SIZE,
                                       MT_RX_EVENT_RING_BASE + hif1_ofs);
                if (ret)
                        return ret;
index 8ededf2..ee3d644 100644 (file)
@@ -4,22 +4,12 @@
 #include "mt7915.h"
 #include "eeprom.h"
 
-static u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
-{
-       u8 *data = dev->mt76.eeprom.data;
-
-       if (data[offset] == 0xff && !dev->flash_mode)
-               mt7915_mcu_get_eeprom(dev, offset);
-
-       return data[offset];
-}
-
 static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
 {
        struct mt76_dev *mdev = &dev->mt76;
-       u32 val;
+       u8 *eeprom = mdev->eeprom.data;
+       u32 val = eeprom[MT_EE_DO_PRE_CAL];
 
-       val = mt7915_eeprom_read(dev, MT_EE_DO_PRE_CAL);
        if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
                return 0;
 
@@ -43,7 +33,13 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
                dev->flash_mode = true;
                ret = mt7915_eeprom_load_precal(dev);
        } else {
-               memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
+               u32 block_num, i;
+
+               block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
+                                        MT7915_EEPROM_BLOCK_SIZE);
+               for (i = 0; i < block_num; i++)
+                       mt7915_mcu_get_eeprom(dev,
+                                             i * MT7915_EEPROM_BLOCK_SIZE);
        }
 
        return ret;
@@ -52,10 +48,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
 static int mt7915_check_eeprom(struct mt7915_dev *dev)
 {
        u8 *eeprom = dev->mt76.eeprom.data;
-       u16 val;
-
-       mt7915_eeprom_read(dev, MT_EE_CHIP_ID);
-       val = get_unaligned_le16(eeprom);
+       u16 val = get_unaligned_le16(eeprom);
 
        switch (val) {
        case 0x7915:
@@ -69,9 +62,10 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
        bool ext_phy = phy != &dev->phy;
+       u8 *eeprom = dev->mt76.eeprom.data;
        u32 val;
 
-       val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF + ext_phy);
+       val = eeprom[MT_EE_WIFI_CONF + ext_phy];
        val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
        if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support)
                val = ext_phy ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
@@ -143,6 +137,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
                                   struct ieee80211_channel *chan,
                                   u8 chain_idx)
 {
+       u8 *eeprom = dev->mt76.eeprom.data;
        int index, target_power;
        bool tssi_on;
 
@@ -153,18 +148,18 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
 
        if (chan->band == NL80211_BAND_2GHZ) {
                index = MT_EE_TX0_POWER_2G + chain_idx * 3;
-               target_power = mt7915_eeprom_read(dev, index);
+               target_power = eeprom[index];
 
                if (!tssi_on)
-                       target_power += mt7915_eeprom_read(dev, index + 1);
+                       target_power += eeprom[index + 1];
        } else {
                int group = mt7915_get_channel_group(chan->hw_value);
 
                index = MT_EE_TX0_POWER_5G + chain_idx * 12;
-               target_power = mt7915_eeprom_read(dev, index + group);
+               target_power = eeprom[index + group];
 
                if (!tssi_on)
-                       target_power += mt7915_eeprom_read(dev, index + 8);
+                       target_power += eeprom[index + 8];
        }
 
        return target_power;
@@ -172,13 +167,14 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
 
 s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
 {
+       u8 *eeprom = dev->mt76.eeprom.data;
        u32 val;
        s8 delta;
 
        if (band == NL80211_BAND_2GHZ)
-               val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_2G);
+               val = eeprom[MT_EE_RATE_DELTA_2G];
        else
-               val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_5G);
+               val = eeprom[MT_EE_RATE_DELTA_5G];
 
        if (!(val & MT_EE_RATE_DELTA_EN))
                return 0;
index 033fb59..a43389a 100644 (file)
@@ -33,7 +33,7 @@ enum mt7915_eeprom_field {
 #define MT_EE_WIFI_CAL_GROUP                   BIT(0)
 #define MT_EE_WIFI_CAL_DPD                     GENMASK(2, 1)
 #define MT_EE_CAL_UNIT                         1024
-#define MT_EE_CAL_GROUP_SIZE                   (44 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_GROUP_SIZE                   (49 * MT_EE_CAL_UNIT + 16)
 #define MT_EE_CAL_DPD_SIZE                     (54 * MT_EE_CAL_UNIT)
 
 #define MT_EE_WIFI_CONF0_TX_PATH               GENMASK(2, 0)
@@ -99,12 +99,15 @@ static inline bool
 mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
 {
        u8 *eep = dev->mt76.eeprom.data;
+       u8 val = eep[MT_EE_WIFI_CONF + 7];
 
-       /* TODO: DBDC */
-       if (band == NL80211_BAND_5GHZ)
-               return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_5G;
+       if (band == NL80211_BAND_2GHZ)
+               return val & MT_EE_WIFI_CONF7_TSSI0_2G;
+
+       if (dev->dbdc_support)
+               return val & MT_EE_WIFI_CONF7_TSSI1_5G;
        else
-               return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G;
+               return val & MT_EE_WIFI_CONF7_TSSI0_5G;
 }
 
 extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM];
index 822f3aa..4798d63 100644 (file)
@@ -2,39 +2,14 @@
 /* Copyright (C) 2020 MediaTek Inc. */
 
 #include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/thermal.h>
 #include "mt7915.h"
 #include "mac.h"
 #include "mcu.h"
 #include "eeprom.h"
 
-#define CCK_RATE(_idx, _rate) {                                                \
-       .bitrate = _rate,                                               \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                         \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),                    \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)),        \
-}
-
-#define OFDM_RATE(_idx, _rate) {                                       \
-       .bitrate = _rate,                                               \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),                   \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),             \
-}
-
-static struct ieee80211_rate mt7915_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(11, 60),
-       OFDM_RATE(15, 90),
-       OFDM_RATE(10, 120),
-       OFDM_RATE(14, 180),
-       OFDM_RATE(9,  240),
-       OFDM_RATE(13, 360),
-       OFDM_RATE(8,  480),
-       OFDM_RATE(12, 540),
-};
-
 static const struct ieee80211_iface_limit if_limits[] = {
        {
                .max = 1,
@@ -67,6 +42,117 @@ static const struct ieee80211_iface_combination if_comb[] = {
        }
 };
 
+static ssize_t mt7915_thermal_show_temp(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct mt7915_phy *phy = dev_get_drvdata(dev);
+       int temperature;
+
+       temperature = mt7915_mcu_get_temperature(phy);
+       if (temperature < 0)
+               return temperature;
+
+       /* display in millidegree celcius */
+       return sprintf(buf, "%u\n", temperature * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7915_thermal_show_temp,
+                         NULL, 0);
+
+static struct attribute *mt7915_hwmon_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(mt7915_hwmon);
+
+static int
+mt7915_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+                                     unsigned long *state)
+{
+       *state = MT7915_THERMAL_THROTTLE_MAX;
+
+       return 0;
+}
+
+static int
+mt7915_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+                                     unsigned long *state)
+{
+       struct mt7915_phy *phy = cdev->devdata;
+
+       *state = phy->throttle_state;
+
+       return 0;
+}
+
+static int
+mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+                                     unsigned long state)
+{
+       struct mt7915_phy *phy = cdev->devdata;
+       int ret;
+
+       if (state > MT7915_THERMAL_THROTTLE_MAX)
+               return -EINVAL;
+
+       if (state == phy->throttle_state)
+               return 0;
+
+       ret = mt7915_mcu_set_thermal_throttling(phy, state);
+       if (ret)
+               return ret;
+
+       phy->throttle_state = state;
+
+       return 0;
+}
+
+static const struct thermal_cooling_device_ops mt7915_thermal_ops = {
+       .get_max_state = mt7915_thermal_get_max_throttle_state,
+       .get_cur_state = mt7915_thermal_get_cur_throttle_state,
+       .set_cur_state = mt7915_thermal_set_cur_throttle_state,
+};
+
+static void mt7915_unregister_thermal(struct mt7915_phy *phy)
+{
+       struct wiphy *wiphy = phy->mt76->hw->wiphy;
+
+       if (!phy->cdev)
+           return;
+
+       sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
+       thermal_cooling_device_unregister(phy->cdev);
+}
+
+static int mt7915_thermal_init(struct mt7915_phy *phy)
+{
+       struct wiphy *wiphy = phy->mt76->hw->wiphy;
+       struct thermal_cooling_device *cdev;
+       struct device *hwmon;
+
+       cdev = thermal_cooling_device_register(wiphy_name(wiphy), phy,
+                                              &mt7915_thermal_ops);
+       if (!IS_ERR(cdev)) {
+               if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj,
+                                     "cooling_device") < 0)
+                       thermal_cooling_device_unregister(cdev);
+               else
+                       phy->cdev = cdev;
+       }
+
+       if (!IS_REACHABLE(CONFIG_HWMON))
+               return 0;
+
+       hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
+                                                      wiphy_name(wiphy), phy,
+                                                      mt7915_hwmon_groups);
+       if (IS_ERR(hwmon))
+               return PTR_ERR(hwmon);
+
+       return 0;
+}
+
 static void
 mt7915_init_txpower(struct mt7915_dev *dev,
                    struct ieee80211_supported_band *sband)
@@ -201,7 +287,6 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
              FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
        mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
 
-       mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
        mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
 
        mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
@@ -228,20 +313,19 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
 {
        int ret;
 
-
        if (dev->dbdc_support) {
-               ret = mt7915_mcu_set_txbf_module(dev);
+               ret = mt7915_mcu_set_txbf(dev, MT_BF_MODULE_UPDATE);
                if (ret)
                        return ret;
        }
 
        /* trigger sounding packets */
-       ret = mt7915_mcu_set_txbf_sounding(dev);
+       ret = mt7915_mcu_set_txbf(dev, MT_BF_SOUNDING_ON);
        if (ret)
                return ret;
 
        /* enable eBF */
-       return mt7915_mcu_set_txbf_type(dev);
+       return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
 }
 
 static int mt7915_register_ext_phy(struct mt7915_dev *dev)
@@ -281,8 +365,12 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
        if (ret)
                goto error;
 
-       ret = mt76_register_phy(mphy, true, mt7915_rates,
-                               ARRAY_SIZE(mt7915_rates));
+       ret = mt76_register_phy(mphy, true, mt76_rates,
+                               ARRAY_SIZE(mt76_rates));
+       if (ret)
+               goto error;
+
+       ret = mt7915_thermal_init(phy);
        if (ret)
                goto error;
 
@@ -480,6 +568,9 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
        if (nss < 2)
                return;
 
+       /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+       elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
+
        if (vif != NL80211_IFTYPE_AP)
                return;
 
@@ -493,9 +584,6 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
        c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
            IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
        elem->phy_cap_info[6] |= c;
-
-       /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
-       elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
 }
 
 static void
@@ -579,8 +667,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
 
                switch (i) {
                case NL80211_IFTYPE_AP:
-                       he_cap_elem->mac_cap_info[0] |=
-                               IEEE80211_HE_MAC_CAP0_TWT_RES;
                        he_cap_elem->mac_cap_info[2] |=
                                IEEE80211_HE_MAC_CAP2_BSR;
                        he_cap_elem->mac_cap_info[4] |=
@@ -594,8 +680,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                                IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
                        break;
                case NL80211_IFTYPE_STATION:
-                       he_cap_elem->mac_cap_info[0] |=
-                               IEEE80211_HE_MAC_CAP0_TWT_REQ;
                        he_cap_elem->mac_cap_info[1] |=
                                IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
 
@@ -690,6 +774,7 @@ static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
        if (!phy)
                return;
 
+       mt7915_unregister_thermal(phy);
        mt76_unregister_phy(mphy);
        ieee80211_free_hw(mphy->hw);
 }
@@ -731,8 +816,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
        dev->mt76.test_ops = &mt7915_testmode_ops;
 #endif
 
-       ret = mt76_register_device(&dev->mt76, true, mt7915_rates,
-                                  ARRAY_SIZE(mt7915_rates));
+       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
+       if (ret)
+               return ret;
+
+       ret = mt7915_thermal_init(&dev->phy);
        if (ret)
                return ret;
 
@@ -748,10 +837,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
 void mt7915_unregister_device(struct mt7915_dev *dev)
 {
        mt7915_unregister_ext_phy(dev);
+       mt7915_unregister_thermal(&dev->phy);
        mt76_unregister_device(&dev->mt76);
        mt7915_mcu_exit(dev);
        mt7915_tx_token_put(dev);
        mt7915_dma_cleanup(dev);
+       tasklet_disable(&dev->irq_tasklet);
 
        mt76_free_device(&dev->mt76);
 }
index 7a9759f..2462704 100644 (file)
@@ -307,7 +307,8 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
        }
 }
 
-int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
+static int
+mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
 {
        struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
        struct mt76_phy *mphy = &dev->mt76.phy;
@@ -412,14 +413,27 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
                u8 *data = (u8 *)rxd;
 
                if (status->flag & RX_FLAG_DECRYPTED) {
-                       status->iv[0] = data[5];
-                       status->iv[1] = data[4];
-                       status->iv[2] = data[3];
-                       status->iv[3] = data[2];
-                       status->iv[4] = data[1];
-                       status->iv[5] = data[0];
-
-                       insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                       switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
+                       case MT_CIPHER_AES_CCMP:
+                       case MT_CIPHER_CCMP_CCX:
+                       case MT_CIPHER_CCMP_256:
+                               insert_ccmp_hdr =
+                                       FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                               fallthrough;
+                       case MT_CIPHER_TKIP:
+                       case MT_CIPHER_TKIP_NO_MIC:
+                       case MT_CIPHER_GCMP:
+                       case MT_CIPHER_GCMP_256:
+                               status->iv[0] = data[5];
+                               status->iv[1] = data[4];
+                               status->iv[2] = data[3];
+                               status->iv[3] = data[2];
+                               status->iv[4] = data[1];
+                               status->iv[5] = data[0];
+                               break;
+                       default:
+                               break;
+                       }
                }
                rxd += 4;
                if ((u8 *)rxd - skb->data >= skb->len)
@@ -610,9 +624,10 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
        return 0;
 }
 
-#ifdef CONFIG_NL80211_TESTMODE
-void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
+static void
+mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
 {
+#ifdef CONFIG_NL80211_TESTMODE
        struct mt7915_phy *phy = &dev->phy;
        __le32 *rxd = (__le32 *)skb->data;
        __le32 *rxv_hdr = rxd + 2;
@@ -650,10 +665,10 @@ void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
 
        phy->test.last_freq_offset = foe;
        phy->test.last_snr = snr;
+#endif
 
        dev_kfree_skb(skb);
 }
-#endif
 
 static void
 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
@@ -885,7 +900,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
 }
 
 void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
-                          struct sk_buff *skb, struct mt76_wcid *wcid,
+                          struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
                           struct ieee80211_key_conf *key, bool beacon)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -944,7 +959,12 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
 
        txwi[3] = cpu_to_le32(val);
        txwi[4] = 0;
-       txwi[5] = 0;
+
+       val = FIELD_PREP(MT_TXD5_PID, pid);
+       if (pid >= MT_PACKET_ID_FIRST)
+               val |= MT_TXD5_TX_STATUS_HOST;
+       txwi[5] = cpu_to_le32(val);
+
        txwi[6] = 0;
        txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
 
@@ -984,11 +1004,11 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
        struct ieee80211_key_conf *key = info->control.hw_key;
        struct ieee80211_vif *vif = info->control.vif;
-       struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
        struct mt76_txwi_cache *t;
        struct mt7915_txp *txp;
        int id, i, nbuf = tx_info->nbuf - 1;
        u8 *txwi = (u8 *)txwi_ptr;
+       int pid;
 
        if (unlikely(tx_info->skb->len <= ETH_HLEN))
                return -EINVAL;
@@ -996,10 +1016,10 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        if (!wcid)
                wcid = &dev->mt76.global_wcid;
 
-       mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
-                             false);
+       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
 
-       cb->wcid = wcid->idx;
+       mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
+                             false);
 
        txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
        for (i = 0; i < nbuf; i++) {
@@ -1071,54 +1091,7 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
 }
 
 static void
-mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
-                         struct ieee80211_sta *sta, u8 stat,
-                         struct list_head *free_list)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_tx_status status = {
-               .sta = sta,
-               .info = info,
-               .skb = skb,
-               .free_list = free_list,
-       };
-       struct ieee80211_hw *hw;
-
-       if (sta) {
-               struct mt7915_sta *msta;
-
-               msta = (struct mt7915_sta *)sta->drv_priv;
-               status.rate = &msta->stats.tx_rate;
-       }
-
-#ifdef CONFIG_NL80211_TESTMODE
-       if (mt76_is_testmode_skb(mdev, skb, &hw)) {
-               struct mt7915_phy *phy = mt7915_hw_phy(hw);
-               struct ieee80211_vif *vif = phy->monitor_vif;
-               struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
-
-               mt76_tx_complete_skb(mdev, mvif->sta.wcid.idx, skb);
-               return;
-       }
-#endif
-
-       hw = mt76_tx_status_get_hw(mdev, skb);
-
-       if (info->flags & IEEE80211_TX_CTL_AMPDU)
-               info->flags |= IEEE80211_TX_STAT_AMPDU;
-
-       if (stat)
-               ieee80211_tx_info_clear_status(info);
-
-       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
-               info->flags |= IEEE80211_TX_STAT_ACK;
-
-       info->status.tx_time = 0;
-       ieee80211_tx_status_ext(hw, &status);
-}
-
-void mt7915_txp_skb_unmap(struct mt76_dev *dev,
-                         struct mt76_txwi_cache *t)
+mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
 {
        struct mt7915_txp *txp;
        int i;
@@ -1129,7 +1102,39 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
                                 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
 }
 
-void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
+static void
+mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
+                struct ieee80211_sta *sta, struct list_head *free_list)
+{
+       struct mt76_dev *mdev = &dev->mt76;
+       struct mt76_wcid *wcid;
+       __le32 *txwi;
+       u16 wcid_idx;
+
+       mt7915_txp_skb_unmap(mdev, t);
+       if (!t->skb)
+               goto out;
+
+       txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
+       if (sta) {
+               wcid = (struct mt76_wcid *)sta->drv_priv;
+               wcid_idx = wcid->idx;
+
+               if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+                       mt7915_tx_check_aggr(sta, txwi);
+       } else {
+               wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+       }
+
+       __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+
+out:
+       t->skb = NULL;
+       mt76_put_txwi(mdev, t);
+}
+
+static void
+mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
 {
        struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data;
        struct mt76_dev *mdev = &dev->mt76;
@@ -1194,28 +1199,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
                if (!txwi)
                        continue;
 
-               mt7915_txp_skb_unmap(mdev, txwi);
-               if (txwi->skb) {
-                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
-                       void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
-
-                       if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
-                               mt7915_tx_check_aggr(sta, txwi_ptr);
-
-                       if (sta && !info->tx_time_est) {
-                               struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-                               int pending;
-
-                               pending = atomic_dec_return(&wcid->non_aql_packets);
-                               if (pending < 0)
-                                       atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
-                       }
-
-                       mt7915_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
-                       txwi->skb = NULL;
-               }
-
-               mt76_put_txwi(mdev, txwi);
+               mt7915_txwi_free(dev, txwi, sta, &free_list);
        }
 
        mt7915_mac_sta_poll(dev);
@@ -1233,6 +1217,120 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
        }
 }
 
+static bool
+mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
+                      __le32 *txs_data)
+{
+       struct mt76_dev *mdev = &dev->mt76;
+       struct ieee80211_tx_info *info;
+       struct sk_buff_head list;
+       struct sk_buff *skb;
+
+       mt76_tx_status_lock(mdev, &list);
+       skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+       if (!skb)
+               goto out;
+
+       info = IEEE80211_SKB_CB(skb);
+       if (!(txs_data[0] & le32_to_cpu(MT_TXS0_ACK_ERROR_MASK)))
+               info->flags |= IEEE80211_TX_STAT_ACK;
+
+       info->status.ampdu_len = 1;
+       info->status.ampdu_ack_len = !!(info->flags &
+                                       IEEE80211_TX_STAT_ACK);
+
+       info->status.rates[0].idx = -1;
+       mt76_tx_status_skb_done(mdev, skb, &list);
+
+out:
+       mt76_tx_status_unlock(mdev, &list);
+
+       return !!skb;
+}
+
+static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
+{
+       struct mt7915_sta *msta = NULL;
+       struct mt76_wcid *wcid;
+       __le32 *txs_data = data;
+       u16 wcidx;
+       u32 txs;
+       u8 pid;
+
+       txs = le32_to_cpu(txs_data[0]);
+       if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+               return;
+
+       txs = le32_to_cpu(txs_data[2]);
+       wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+
+       txs = le32_to_cpu(txs_data[3]);
+       pid = FIELD_GET(MT_TXS3_PID, txs);
+
+       if (pid < MT_PACKET_ID_FIRST)
+               return;
+
+       if (wcidx >= MT7915_WTBL_SIZE)
+               return;
+
+       rcu_read_lock();
+
+       wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+       if (!wcid)
+               goto out;
+
+       mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data);
+
+       if (!wcid->sta)
+               goto out;
+
+       msta = container_of(wcid, struct mt7915_sta, wcid);
+       spin_lock_bh(&dev->sta_poll_lock);
+       if (list_empty(&msta->poll_list))
+               list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+       spin_unlock_bh(&dev->sta_poll_lock);
+
+out:
+       rcu_read_unlock();
+}
+
+void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+                        struct sk_buff *skb)
+{
+       struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+       __le32 *rxd = (__le32 *)skb->data;
+       __le32 *end = (__le32 *)&skb->data[skb->len];
+       enum rx_pkt_type type;
+
+       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+       switch (type) {
+       case PKT_TYPE_TXRX_NOTIFY:
+               mt7915_mac_tx_free(dev, skb);
+               break;
+       case PKT_TYPE_RX_EVENT:
+               mt7915_mcu_rx_event(dev, skb);
+               break;
+       case PKT_TYPE_TXRXV:
+               mt7915_mac_fill_rx_vector(dev, skb);
+               break;
+       case PKT_TYPE_TXS:
+               for (rxd += 2; rxd + 8 <= end; rxd += 8)
+                   mt7915_mac_add_txs(dev, rxd);
+               dev_kfree_skb(skb);
+               break;
+       case PKT_TYPE_NORMAL:
+               if (!mt7915_mac_fill_rx(dev, skb)) {
+                       mt76_rx(&dev->mt76, q, skb);
+                       return;
+               }
+               fallthrough;
+       default:
+               dev_kfree_skb(skb);
+               break;
+       }
+}
+
 void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
 {
        struct mt7915_dev *dev;
@@ -1254,15 +1352,8 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
                e->skb = t ? t->skb : NULL;
        }
 
-       if (e->skb) {
-               struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
-               struct mt76_wcid *wcid;
-
-               wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
-
-               mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
-                                         NULL);
-       }
+       if (e->skb)
+               mt76_tx_complete_skb(mdev, e->wcid, e->skb);
 }
 
 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
@@ -1296,14 +1387,10 @@ void mt7915_mac_reset_counters(struct mt7915_phy *phy)
        memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
 
        /* reset airtime counters */
-       mt76_rr(dev, MT_MIB_SDR9(ext_phy));
-       mt76_rr(dev, MT_MIB_SDR36(ext_phy));
-       mt76_rr(dev, MT_MIB_SDR37(ext_phy));
-
-       mt76_set(dev, MT_WF_RMAC_MIB_TIME0(ext_phy),
-                MT_WF_RMAC_MIB_RXTIME_CLR);
        mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
                 MT_WF_RMAC_MIB_RXTIME_CLR);
+
+       mt7915_mcu_get_chan_mib_info(phy, true);
 }
 
 void mt7915_mac_set_timing(struct mt7915_phy *phy)
@@ -1397,53 +1484,24 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
        return sum / n;
 }
 
-static void
-mt7915_phy_update_channel(struct mt76_phy *mphy, int idx)
+void mt7915_update_channel(struct mt76_phy *mphy)
 {
-       struct mt7915_dev *dev = container_of(mphy->dev, struct mt7915_dev, mt76);
        struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
-       struct mt76_channel_state *state;
-       u64 busy_time, tx_time, rx_time, obss_time;
+       struct mt76_channel_state *state = mphy->chan_state;
+       bool ext_phy = phy != &phy->dev->phy;
        int nf;
 
-       busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
-                                  MT_MIB_SDR9_BUSY_MASK);
-       tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
-                                MT_MIB_SDR36_TXTIME_MASK);
-       rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
-                                MT_MIB_SDR37_RXTIME_MASK);
-       obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
-                                  MT_MIB_OBSSTIME_MASK);
+       mt7915_mcu_get_chan_mib_info(phy, false);
 
-       nf = mt7915_phy_get_nf(phy, idx);
+       nf = mt7915_phy_get_nf(phy, ext_phy);
        if (!phy->noise)
                phy->noise = nf << 4;
        else if (nf)
                phy->noise += nf - (phy->noise >> 4);
 
-       state = mphy->chan_state;
-       state->cc_busy += busy_time;
-       state->cc_tx += tx_time;
-       state->cc_rx += rx_time + obss_time;
-       state->cc_bss_rx += rx_time;
        state->noise = -(phy->noise >> 4);
 }
 
-void mt7915_update_channel(struct mt76_dev *mdev)
-{
-       struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
-
-       mt7915_phy_update_channel(&mdev->phy, 0);
-       if (mdev->phy2)
-               mt7915_phy_update_channel(mdev->phy2, 1);
-
-       /* reset obss airtime */
-       mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
-       if (mdev->phy2)
-               mt76_set(dev, MT_WF_RMAC_MIB_TIME0(1),
-                        MT_WF_RMAC_MIB_RXTIME_CLR);
-}
-
 static bool
 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
 {
@@ -1530,14 +1588,18 @@ mt7915_dma_reset(struct mt7915_dev *dev)
        mt76_set(dev, MT_WFDMA0_GLO_CFG,
                 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
        mt76_set(dev, MT_WFDMA1_GLO_CFG,
-                MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+                MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+                MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+                MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
        if (dev->hif2) {
                mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
                        (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
                         MT_WFDMA0_GLO_CFG_RX_DMA_EN));
                mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
                        (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
-                        MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+                        MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+                        MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+                        MT_WFDMA1_GLO_CFG_OMIT_RX_INFO));
        }
 }
 
@@ -1548,14 +1610,7 @@ void mt7915_tx_token_put(struct mt7915_dev *dev)
 
        spin_lock_bh(&dev->mt76.token_lock);
        idr_for_each_entry(&dev->mt76.token, txwi, id) {
-               mt7915_txp_skb_unmap(&dev->mt76, txwi);
-               if (txwi->skb) {
-                       struct ieee80211_hw *hw;
-
-                       hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
-                       ieee80211_free_txskb(hw, txwi->skb);
-               }
-               mt76_put_txwi(&dev->mt76, txwi);
+               mt7915_txwi_free(dev, txwi, NULL, NULL);
                dev->mt76.token_count--;
        }
        spin_unlock_bh(&dev->mt76.token_lock);
@@ -1588,11 +1643,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
                set_bit(MT76_RESET, &phy2->mt76->state);
                cancel_delayed_work_sync(&phy2->mt76->mac_work);
        }
-       /* lock/unlock all queues to ensure that no tx is pending */
-       mt76_txq_schedule_all(&dev->mphy);
-       if (ext_phy)
-               mt76_txq_schedule_all(ext_phy);
-
        mt76_worker_disable(&dev->mt76.tx_worker);
        napi_disable(&dev->mt76.napi[0]);
        napi_disable(&dev->mt76.napi[1]);
@@ -1618,10 +1668,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
        if (phy2)
                clear_bit(MT76_RESET, &phy2->mt76->state);
 
-       mt76_worker_enable(&dev->mt76.tx_worker);
-       napi_enable(&dev->mt76.tx_napi);
-       napi_schedule(&dev->mt76.tx_napi);
-
        napi_enable(&dev->mt76.napi[0]);
        napi_schedule(&dev->mt76.napi[0]);
 
@@ -1630,14 +1676,20 @@ void mt7915_mac_reset_work(struct work_struct *work)
 
        napi_enable(&dev->mt76.napi[2]);
        napi_schedule(&dev->mt76.napi[2]);
+       tasklet_schedule(&dev->irq_tasklet);
+
+       mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+       mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+       mt76_worker_enable(&dev->mt76.tx_worker);
+
+       napi_enable(&dev->mt76.tx_napi);
+       napi_schedule(&dev->mt76.tx_napi);
 
        ieee80211_wake_queues(mt76_hw(dev));
        if (ext_phy)
                ieee80211_wake_queues(ext_phy->hw);
 
-       mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
-       mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
-
        mutex_unlock(&dev->mt76.mutex);
 
        mt7915_update_beacons(dev);
@@ -1651,7 +1703,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
 }
 
 static void
-mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
+mt7915_mac_update_stats(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
        struct mib_stats *mib = &phy->mib;
@@ -1733,8 +1785,10 @@ void mt7915_mac_sta_rc_work(struct work_struct *work)
 
                if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
                               IEEE80211_RC_NSS_CHANGED |
-                              IEEE80211_RC_BW_CHANGED))
+                              IEEE80211_RC_BW_CHANGED)) {
+                       mt7915_mcu_add_he(dev, vif, sta);
                        mt7915_mcu_add_rate_ctrl(dev, vif, sta);
+               }
 
                if (changed & IEEE80211_RC_SMPS_CHANGED)
                        mt7915_mcu_add_smps(dev, vif, sta);
@@ -1756,11 +1810,11 @@ void mt7915_mac_work(struct work_struct *work)
 
        mutex_lock(&mphy->dev->mutex);
 
-       mt76_update_survey(mphy->dev);
+       mt76_update_survey(mphy);
        if (++mphy->mac_work_count == 5) {
                mphy->mac_work_count = 0;
 
-               mt7915_mac_update_mib_stats(phy);
+               mt7915_mac_update_stats(phy);
        }
 
        if (++phy->sta_work_count == 10) {
@@ -1770,6 +1824,8 @@ void mt7915_mac_work(struct work_struct *work)
 
        mutex_unlock(&mphy->dev->mutex);
 
+       mt76_tx_status_check(mphy->dev, NULL, false);
+
        ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
                                     MT7915_WATCHDOG_TIME);
 }
index 0f929fb..eb1885f 100644 (file)
@@ -304,6 +304,62 @@ struct mt7915_tx_free {
 /* will support this field in further revision */
 #define MT_TX_FREE_RATE                        GENMASK(13, 0)
 
+#define MT_TXS0_FIXED_RATE             BIT(31)
+#define MT_TXS0_BW                     GENMASK(30, 29)
+#define MT_TXS0_TID                    GENMASK(28, 26)
+#define MT_TXS0_AMPDU                  BIT(25)
+#define MT_TXS0_TXS_FORMAT             GENMASK(24, 23)
+#define MT_TXS0_BA_ERROR               BIT(22)
+#define MT_TXS0_PS_FLAG                        BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT           BIT(20)
+#define MT_TXS0_BIP_ERROR              BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT          BIT(18)
+#define MT_TXS0_RTS_TIMEOUT            BIT(17)
+#define MT_TXS0_ACK_TIMEOUT            BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK         GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST         BIT(15)
+#define MT_TXS0_TX_STATUS_MCU          BIT(14)
+#define MT_TXS0_TX_RATE                        GENMASK(13, 0)
+
+#define MT_TXS1_SEQNO                  GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE              GENMASK(19, 16)
+#define MT_TXS1_RXV_SEQNO              GENMASK(15, 8)
+#define MT_TXS1_TX_POWER_DBM           GENMASK(7, 0)
+
+#define MT_TXS2_BF_STATUS              GENMASK(31, 30)
+#define MT_TXS2_LAST_TX_RATE           GENMASK(29, 27)
+#define MT_TXS2_SHARED_ANTENNA         BIT(26)
+#define MT_TXS2_WCID                   GENMASK(25, 16)
+#define MT_TXS2_TX_DELAY               GENMASK(15, 0)
+
+#define MT_TXS3_PID                    GENMASK(31, 24)
+#define MT_TXS3_ANT_ID                 GENMASK(23, 0)
+
+#define MT_TXS4_TIMESTAMP              GENMASK(31, 0)
+
+#define MT_TXS5_F0_FINAL_MPDU          BIT(31)
+#define MT_TXS5_F0_QOS                 BIT(30)
+#define MT_TXS5_F0_TX_COUNT            GENMASK(29, 25)
+#define MT_TXS5_F0_FRONT_TIME          GENMASK(24, 0)
+#define MT_TXS5_F1_MPDU_TX_COUNT       GENMASK(31, 24)
+#define MT_TXS5_F1_MPDU_TX_BYTES       GENMASK(23, 0)
+
+#define MT_TXS6_F0_NOISE_3             GENMASK(31, 24)
+#define MT_TXS6_F0_NOISE_2             GENMASK(23, 16)
+#define MT_TXS6_F0_NOISE_1             GENMASK(15, 8)
+#define MT_TXS6_F0_NOISE_0             GENMASK(7, 0)
+#define MT_TXS6_F1_MPDU_FAIL_COUNT     GENMASK(31, 24)
+#define MT_TXS6_F1_MPDU_FAIL_BYTES     GENMASK(23, 0)
+
+#define MT_TXS7_F0_RCPI_3              GENMASK(31, 24)
+#define MT_TXS7_F0_RCPI_2              GENMASK(23, 16)
+#define MT_TXS7_F0_RCPI_1              GENMASK(15, 8)
+#define MT_TXS7_F0_RCPI_0              GENMASK(7, 0)
+#define MT_TXS7_F1_MPDU_RETRY_COUNT    GENMASK(31, 24)
+#define MT_TXS7_F1_MPDU_RETRY_BYTES    GENMASK(23, 0)
+
 struct mt7915_dfs_pulse {
        u32 max_width;          /* us */
        int max_pwr;            /* dbm */
index e5bd687..c25f8da 100644 (file)
@@ -139,12 +139,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
                if (type != NL80211_IFTYPE_STATION)
                        break;
 
-               /* next, try to find a free repeater entry for the sta */
-               i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
-                                REPEATER_BSSID_MAX - REPEATER_BSSID_START);
-               if (i)
-                       return i + 32 - 1;
-
                i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
                if (i)
                        return i - 1;
@@ -172,6 +166,22 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
        return -1;
 }
 
+static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
+               mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
+               memset(mvif->bitrate_mask.control[i].ht_mcs, GENMASK(7, 0),
+                      sizeof(mvif->bitrate_mask.control[i].ht_mcs));
+               memset(mvif->bitrate_mask.control[i].vht_mcs, GENMASK(15, 0),
+                      sizeof(mvif->bitrate_mask.control[i].vht_mcs));
+               memset(mvif->bitrate_mask.control[i].he_mcs, GENMASK(15, 0),
+                      sizeof(mvif->bitrate_mask.control[i].he_mcs));
+       }
+}
+
 static int mt7915_add_interface(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif)
 {
@@ -241,6 +251,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
                vif->offload_flags = 0;
        vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
 
+       mt7915_init_bitrate_mask(vif);
+
 out:
        mutex_unlock(&dev->mt76.mutex);
 
@@ -798,7 +810,8 @@ mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
        n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
        /* TSF software read */
-       mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE);
+       mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+                MT_LPON_TCR_SW_READ);
        tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
        tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
 
@@ -827,7 +840,34 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
        mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
        /* TSF software overwrite */
-       mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE);
+       mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+                MT_LPON_TCR_SW_WRITE);
+
+       mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 s64 timestamp)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_dev *dev = mt7915_hw_dev(hw);
+       struct mt7915_phy *phy = mt7915_hw_phy(hw);
+       bool band = phy != &dev->phy;
+       union {
+               u64 t64;
+               u32 t32[2];
+       } tsf = { .t64 = timestamp, };
+       u16 n;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+       mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
+       mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
+       /* TSF software adjust*/
+       mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+                MT_LPON_TCR_SW_ADJUST);
 
        mutex_unlock(&dev->mt76.mutex);
 }
@@ -911,17 +951,15 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
        sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
 }
 
-static void
-mt7915_sta_rc_update(struct ieee80211_hw *hw,
-                    struct ieee80211_vif *vif,
-                    struct ieee80211_sta *sta,
-                    u32 changed)
+static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
 {
-       struct mt7915_dev *dev = mt7915_hw_dev(hw);
        struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct mt7915_dev *dev = msta->vif->phy->dev;
+       struct ieee80211_hw *hw = msta->vif->phy->mt76->hw;
+       u32 *changed = data;
 
        spin_lock_bh(&dev->sta_poll_lock);
-       msta->stats.changed |= changed;
+       msta->stats.changed |= *changed;
        if (list_empty(&msta->rc_list))
                list_add_tail(&msta->rc_list, &dev->sta_rc_list);
        spin_unlock_bh(&dev->sta_poll_lock);
@@ -929,6 +967,39 @@ mt7915_sta_rc_update(struct ieee80211_hw *hw,
        ieee80211_queue_work(hw, &dev->rc_work);
 }
 
+static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_sta *sta,
+                                u32 changed)
+{
+       mt7915_sta_rc_work(&changed, sta);
+}
+
+static int
+mt7915_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       const struct cfg80211_bitrate_mask *mask)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
+       u32 changed;
+
+       if (mask->control[band].gi == NL80211_TXRATE_FORCE_LGI)
+               return -EINVAL;
+
+       changed = IEEE80211_RC_SUPP_RATES_CHANGED;
+       mvif->bitrate_mask = *mask;
+
+       /* Update firmware rate control to add a boundary on top of table
+        * to limit the rate selection for each peer, so when set bitrates
+        * vht-mcs-5 1:9, which actually means nss = 1 mcs = 0~9. This only
+        * applies to data frames as for the other mgmt, mcast, bcast still
+        * use legacy rates as it is.
+        */
+       ieee80211_iterate_stations_atomic(hw, mt7915_sta_rc_work, &changed);
+
+       return 0;
+}
+
 static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_sta *sta,
@@ -987,9 +1058,11 @@ const struct ieee80211_ops mt7915_ops = {
        .get_stats = mt7915_get_stats,
        .get_tsf = mt7915_get_tsf,
        .set_tsf = mt7915_set_tsf,
+       .offset_tsf = mt7915_offset_tsf,
        .get_survey = mt76_get_survey,
        .get_antenna = mt76_get_antenna,
        .set_antenna = mt7915_set_antenna,
+       .set_bitrate_mask = mt7915_set_bitrate_mask,
        .set_coverage_class = mt7915_set_coverage_class,
        .sta_statistics = mt7915_sta_statistics,
        .sta_set_4addr = mt7915_sta_set_4addr,
index b3f14ff..863aa18 100644 (file)
@@ -88,28 +88,28 @@ struct mt7915_fw_region {
 #define HE_PHY(p, c)                   u8_get_bits(c, IEEE80211_HE_PHY_##p)
 #define HE_MAC(m, c)                   u8_get_bits(c, IEEE80211_HE_MAC_##m)
 
-static enum mt7915_cipher_type
+static enum mcu_cipher_type
 mt7915_mcu_get_cipher(int cipher)
 {
        switch (cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
+               return MCU_CIPHER_WEP40;
        case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
+               return MCU_CIPHER_WEP104;
        case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
+               return MCU_CIPHER_TKIP;
        case WLAN_CIPHER_SUITE_AES_CMAC:
-               return MT_CIPHER_BIP_CMAC_128;
+               return MCU_CIPHER_BIP_CMAC_128;
        case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
+               return MCU_CIPHER_AES_CCMP;
        case WLAN_CIPHER_SUITE_CCMP_256:
-               return MT_CIPHER_CCMP_256;
+               return MCU_CIPHER_CCMP_256;
        case WLAN_CIPHER_SUITE_GCMP:
-               return MT_CIPHER_GCMP;
+               return MCU_CIPHER_GCMP;
        case WLAN_CIPHER_SUITE_GCMP_256:
-               return MT_CIPHER_GCMP_256;
+               return MCU_CIPHER_GCMP_256;
        case WLAN_CIPHER_SUITE_SMS4:
-               return MT_CIPHER_WAPI;
+               return MCU_CIPHER_WAPI;
        default:
                return MT_CIPHER_NONE;
        }
@@ -147,10 +147,10 @@ mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
 }
 
 static u8
-mt7915_get_phy_mode(struct mt76_phy *mphy, struct ieee80211_vif *vif,
-                   struct ieee80211_sta *sta)
+mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
 {
-       enum nl80211_band band = mphy->chandef.chan->band;
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
        struct ieee80211_sta_ht_cap *ht_cap;
        struct ieee80211_sta_vht_cap *vht_cap;
        const struct ieee80211_sta_he_cap *he_cap;
@@ -163,7 +163,7 @@ mt7915_get_phy_mode(struct mt76_phy *mphy, struct ieee80211_vif *vif,
        } else {
                struct ieee80211_supported_band *sband;
 
-               sband = mphy->hw->wiphy->bands[band];
+               sband = mvif->phy->mt76->hw->wiphy->bands[band];
 
                ht_cap = &sband->ht_cap;
                vht_cap = &sband->vht_cap;
@@ -209,6 +209,112 @@ mt7915_mcu_get_sta_nss(u16 mcs_map)
        return nss - 1;
 }
 
+static void
+mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
+                         const u16 *mask)
+{
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct cfg80211_chan_def *chandef = &msta->vif->phy->mt76->chandef;
+       int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+       u16 mcs_map;
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_80P80:
+               mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80p80);
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+               break;
+       default:
+               mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+               break;
+       }
+
+       for (nss = 0; nss < max_nss; nss++) {
+               int mcs;
+
+               switch ((mcs_map >> (2 * nss)) & 0x3) {
+               case IEEE80211_HE_MCS_SUPPORT_0_11:
+                       mcs = GENMASK(11, 0);
+                       break;
+               case IEEE80211_HE_MCS_SUPPORT_0_9:
+                       mcs = GENMASK(9, 0);
+                       break;
+               case IEEE80211_HE_MCS_SUPPORT_0_7:
+                       mcs = GENMASK(7, 0);
+                       break;
+               default:
+                       mcs = 0;
+               }
+
+               mcs = mcs ? fls(mcs & mask[nss]) - 1 : -1;
+
+               switch (mcs) {
+               case 0 ... 7:
+                       mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+                       break;
+               case 8 ... 9:
+                       mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+                       break;
+               case 10 ... 11:
+                       mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+                       break;
+               default:
+                       mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+                       break;
+               }
+               mcs_map &= ~(0x3 << (nss * 2));
+               mcs_map |= mcs << (nss * 2);
+
+               /* only support 2ss on 160MHz */
+               if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+                       break;
+       }
+
+       *he_mcs = cpu_to_le16(mcs_map);
+}
+
+static void
+mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
+                          const u16 *mask)
+{
+       u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+       int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+       u16 mcs;
+
+       for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
+               switch (mcs_map & 0x3) {
+               case IEEE80211_VHT_MCS_SUPPORT_0_9:
+                       mcs = GENMASK(9, 0);
+                       break;
+               case IEEE80211_VHT_MCS_SUPPORT_0_8:
+                       mcs = GENMASK(8, 0);
+                       break;
+               case IEEE80211_VHT_MCS_SUPPORT_0_7:
+                       mcs = GENMASK(7, 0);
+                       break;
+               default:
+                       mcs = 0;
+               }
+
+               vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]);
+
+               /* only support 2ss on 160MHz */
+               if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+                       break;
+       }
+}
+
+static void
+mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
+                         const u8 *mask)
+{
+       int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+
+       for (nss = 0; nss < max_nss; nss++)
+               ht_mcs[nss] = sta->ht_cap.mcs.rx_mask[nss] & mask[nss];
+}
+
 static int
 mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
                          struct sk_buff *skb, int seq)
@@ -349,6 +455,24 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
                        mt7915_mcu_csa_finish, mphy->hw);
 }
 
+static void
+mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+       struct mt76_phy *mphy = &dev->mt76.phy;
+       struct mt7915_mcu_thermal_notify *t;
+       struct mt7915_phy *phy;
+
+       t = (struct mt7915_mcu_thermal_notify *)skb->data;
+       if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
+               return;
+
+       if (t->ctrl.band_idx && dev->mt76.phy2)
+               mphy = dev->mt76.phy2;
+
+       phy = (struct mt7915_phy *)mphy->priv;
+       phy->throttle_state = t->ctrl.duty.duty_cycle;
+}
+
 static void
 mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
 {
@@ -469,6 +593,7 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
        u16 attempts = le16_to_cpu(ra->attempts);
        u16 curr = le16_to_cpu(ra->curr_rate);
        u16 wcidx = le16_to_cpu(ra->wlan_idx);
+       struct ieee80211_tx_status status = {};
        struct mt76_phy *mphy = &dev->mphy;
        struct mt7915_sta_stats *stats;
        struct mt7915_sta *msta;
@@ -500,6 +625,13 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
 
                stats->per = 1000 * (attempts - success) / attempts;
        }
+
+       status.sta = wcid_to_sta(wcid);
+       if (!status.sta)
+               return;
+
+       status.rate = &stats->tx_rate;
+       ieee80211_tx_status_ext(mphy->hw, &status);
 }
 
 static void
@@ -531,6 +663,9 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
        struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
 
        switch (rxd->ext_eid) {
+       case MCU_EXT_EVENT_THERMAL_PROTECT:
+               mt7915_mcu_rx_thermal_notify(dev, skb);
+               break;
        case MCU_EXT_EVENT_RDD_REPORT:
                mt7915_mcu_rx_radar_detected(dev, skb);
                break;
@@ -733,7 +868,7 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
                memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
                bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
                bss->dtim_period = vif->bss_conf.dtim_period;
-               bss->phy_mode = mt7915_get_phy_mode(phy->mt76, vif, NULL);
+               bss->phy_mode = mt7915_get_phy_mode(vif, NULL);
        } else {
                memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
        }
@@ -1072,14 +1207,14 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
                sec_key = &sec->key[0];
                sec_key->cipher_len = sizeof(*sec_key);
 
-               if (cipher == MT_CIPHER_BIP_CMAC_128) {
-                       sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+               if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+                       sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
                        sec_key->key_id = bip->keyidx;
                        sec_key->key_len = 16;
                        memcpy(sec_key->key, bip->key, 16);
 
                        sec_key = &sec->key[1];
-                       sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
+                       sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
                        sec_key->cipher_len = sizeof(*sec_key);
                        sec_key->key_len = 16;
                        memcpy(sec_key->key, key->key, 16);
@@ -1091,14 +1226,14 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
                        sec_key->key_len = key->keylen;
                        memcpy(sec_key->key, key->key, key->keylen);
 
-                       if (cipher == MT_CIPHER_TKIP) {
+                       if (cipher == MCU_CIPHER_TKIP) {
                                /* Rx/Tx MIC keys are swapped */
                                memcpy(sec_key->key + 16, key->key + 24, 8);
                                memcpy(sec_key->key + 24, key->key + 16, 8);
                        }
 
                        /* store key_conf for BIP batch update */
-                       if (cipher == MT_CIPHER_AES_CCMP) {
+                       if (cipher == MCU_CIPHER_AES_CCMP) {
                                memcpy(bip->key, key->key, key->keylen);
                                bip->keyidx = key->keyidx;
                        }
@@ -1336,8 +1471,11 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
 static void
 mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
 {
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
        struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
        struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+       enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
+       const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs;
        struct sta_rec_he *he;
        struct tlv *tlv;
        u32 cap = 0;
@@ -1428,15 +1566,18 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
        case IEEE80211_STA_RX_BW_160:
                if (elem->phy_cap_info[0] &
                    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
-                       he->max_nss_mcs[CMD_HE_MCS_BW8080] =
-                               he_cap->he_mcs_nss_supp.rx_mcs_80p80;
+                       mt7915_mcu_set_sta_he_mcs(sta,
+                                                 &he->max_nss_mcs[CMD_HE_MCS_BW8080],
+                                                 mcs_mask);
 
-               he->max_nss_mcs[CMD_HE_MCS_BW160] =
-                               he_cap->he_mcs_nss_supp.rx_mcs_160;
+               mt7915_mcu_set_sta_he_mcs(sta,
+                                         &he->max_nss_mcs[CMD_HE_MCS_BW160],
+                                         mcs_mask);
                fallthrough;
        default:
-               he->max_nss_mcs[CMD_HE_MCS_BW80] =
-                               he_cap->he_mcs_nss_supp.rx_mcs_80;
+               mt7915_mcu_set_sta_he_mcs(sta,
+                                         &he->max_nss_mcs[CMD_HE_MCS_BW80],
+                                         mcs_mask);
                break;
        }
 
@@ -1544,27 +1685,18 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
                HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
 }
 
-static int
-mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
-                 struct ieee80211_sta *sta)
+static void
+mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
 {
-       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
-       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
-       struct sk_buff *skb;
-       int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_muru);
-
-       if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
-               return 0;
-
-       skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       struct sta_rec_vht *vht;
+       struct tlv *tlv;
 
-       /* starec muru */
-       mt7915_mcu_sta_muru_tlv(skb, sta);
+       tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
 
-       return mt76_mcu_skb_send_msg(&dev->mt76, skb,
-                                    MCU_EXT_CMD(STA_REC_UPDATE), true);
+       vht = (struct sta_rec_vht *)tlv;
+       vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+       vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+       vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
 }
 
 static void
@@ -1616,17 +1748,6 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
                        mt7915_mcu_sta_amsdu_tlv(skb, sta);
        }
 
-       /* starec vht */
-       if (sta->vht_cap.vht_supported) {
-               struct sta_rec_vht *vht;
-
-               tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
-               vht = (struct sta_rec_vht *)tlv;
-               vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
-               vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
-               vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
-       }
-
        /* starec he */
        if (sta->he_cap.has_he)
                mt7915_mcu_sta_he_tlv(skb, sta);
@@ -2016,26 +2137,21 @@ mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                vc = mt7915_get_he_phy_cap(phy, vif);
                ve = &vc->he_cap_elem;
 
-               ebfee = !!((HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) ||
-                           HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4])) &&
+               ebfee = !!(HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) &&
                           HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4]));
-               ebf = !!((HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) ||
-                         HE_PHY(CAP4_MU_BEAMFORMER, ve->phy_cap_info[4])) &&
+               ebf = !!(HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) &&
                         HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]));
        } else if (sta->vht_cap.vht_supported) {
                struct ieee80211_sta_vht_cap *pc;
                struct ieee80211_sta_vht_cap *vc;
-               u32 cr, ce;
 
                pc = &sta->vht_cap;
                vc = &phy->mt76->sband_5g.sband.vht_cap;
-               cr = IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
-                    IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
-               ce = IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
-                    IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
 
-               ebfee = !!((pc->cap & cr) && (vc->cap & ce));
-               ebf = !!((vc->cap & cr) && (pc->cap & ce));
+               ebfee = !!((pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
+                          (vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+               ebf = !!((vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
+                        (pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
        }
 
        /* must keep each tag independent */
@@ -2079,57 +2195,47 @@ static void
 mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                             struct ieee80211_vif *vif, struct ieee80211_sta *sta)
 {
-       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
-       struct mt76_phy *mphy = &dev->mphy;
-       enum nl80211_band band;
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+       struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+       enum nl80211_band band = chandef->chan->band;
        struct sta_rec_ra *ra;
        struct tlv *tlv;
-       u32 supp_rate, n_rates, cap = sta->wme ? STA_CAP_WMM : 0;
-       u8 i, nss = sta->rx_nss, mcs = 0;
+       u32 supp_rate = sta->supp_rates[band];
+       u32 cap = sta->wme ? STA_CAP_WMM : 0;
 
        tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
        ra = (struct sta_rec_ra *)tlv;
 
-       if (msta->wcid.ext_phy && dev->mt76.phy2)
-               mphy = dev->mt76.phy2;
-
-       band = mphy->chandef.chan->band;
-       supp_rate = sta->supp_rates[band];
-       n_rates = hweight32(supp_rate);
-
        ra->valid = true;
        ra->auto_rate = true;
-       ra->phy_mode = mt7915_get_phy_mode(mphy, vif, sta);
-       ra->channel = mphy->chandef.chan->hw_value;
+       ra->phy_mode = mt7915_get_phy_mode(vif, sta);
+       ra->channel = chandef->chan->hw_value;
        ra->bw = sta->bandwidth;
-       ra->rate_len = n_rates;
        ra->phy.bw = sta->bandwidth;
 
-       if (n_rates) {
+       if (supp_rate) {
+               supp_rate &= mask->control[band].legacy;
+               ra->rate_len = hweight32(supp_rate);
+
                if (band == NL80211_BAND_2GHZ) {
                        ra->supp_mode = MODE_CCK;
                        ra->supp_cck_rate = supp_rate & GENMASK(3, 0);
-                       ra->phy.type = MT_PHY_TYPE_CCK;
 
-                       if (n_rates > 4) {
+                       if (ra->rate_len > 4) {
                                ra->supp_mode |= MODE_OFDM;
                                ra->supp_ofdm_rate = supp_rate >> 4;
-                               ra->phy.type = MT_PHY_TYPE_OFDM;
                        }
                } else {
                        ra->supp_mode = MODE_OFDM;
                        ra->supp_ofdm_rate = supp_rate;
-                       ra->phy.type = MT_PHY_TYPE_OFDM;
                }
        }
 
        if (sta->ht_cap.ht_supported) {
-               for (i = 0; i < nss; i++)
-                       ra->ht_mcs[i] = sta->ht_cap.mcs.rx_mask[i];
+               const u8 *mcs_mask = mask->control[band].ht_mcs;
 
-               ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
                ra->supp_mode |= MODE_HT;
-               mcs = hweight32(le32_to_cpu(ra->supp_ht_mcs)) - 1;
                ra->af = sta->ht_cap.ampdu_factor;
                ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
 
@@ -2144,13 +2250,16 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                        cap |= STA_CAP_RX_STBC;
                if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
                        cap |= STA_CAP_LDPC;
+
+               mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, mcs_mask);
+               ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
        }
 
        if (sta->vht_cap.vht_supported) {
-               u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
-               u16 vht_mcs;
-               u8 af, mcs_prev;
+               const u16 *mcs_mask = mask->control[band].vht_mcs;
+               u8 af;
 
+               ra->supp_mode |= MODE_VHT;
                af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
                               sta->vht_cap.cap);
                ra->af = max_t(u8, ra->af, af);
@@ -2167,33 +2276,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
                        cap |= STA_CAP_VHT_LDPC;
 
-               ra->supp_mode |= MODE_VHT;
-               for (mcs = 0, i = 0; i < nss; i++, mcs_map >>= 2) {
-                       switch (mcs_map & 0x3) {
-                       case IEEE80211_VHT_MCS_SUPPORT_0_9:
-                               vht_mcs = GENMASK(9, 0);
-                               break;
-                       case IEEE80211_VHT_MCS_SUPPORT_0_8:
-                               vht_mcs = GENMASK(8, 0);
-                               break;
-                       case IEEE80211_VHT_MCS_SUPPORT_0_7:
-                               vht_mcs = GENMASK(7, 0);
-                               break;
-                       default:
-                               vht_mcs = 0;
-                       }
-
-                       ra->supp_vht_mcs[i] = cpu_to_le16(vht_mcs);
-
-                       mcs_prev = hweight16(vht_mcs) - 1;
-                       if (mcs_prev > mcs)
-                               mcs = mcs_prev;
-
-                       /* only support 2ss on 160MHz */
-                       if (i > 1 && (ra->bw == CMD_CBW_160MHZ ||
-                                     ra->bw == CMD_CBW_8080MHZ))
-                               break;
-               }
+               mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, mcs_mask);
        }
 
        if (sta->he_cap.has_he) {
@@ -2201,28 +2284,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                cap |= STA_CAP_HE;
        }
 
-       ra->sta_status = cpu_to_le32(cap);
-
-       switch (BIT(fls(ra->supp_mode) - 1)) {
-       case MODE_VHT:
-               ra->phy.type = MT_PHY_TYPE_VHT;
-               ra->phy.mcs = mcs;
-               ra->phy.nss = nss;
-               ra->phy.stbc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC);
-               ra->phy.ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
-               ra->phy.sgi =
-                       !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
-               break;
-       case MODE_HT:
-               ra->phy.type = MT_PHY_TYPE_HT;
-               ra->phy.mcs = mcs;
-               ra->phy.ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
-               ra->phy.stbc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC);
-               ra->phy.sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
-               break;
-       default:
-               break;
-       }
+       ra->sta_cap = cpu_to_le32(cap);
 }
 
 int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -2243,6 +2305,87 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                                     MCU_EXT_CMD(STA_REC_UPDATE), true);
 }
 
+int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct sk_buff *skb;
+       int len;
+
+       if (!sta->he_cap.has_he)
+               return 0;
+
+       len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_he);
+
+       skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       mt7915_mcu_sta_he_tlv(skb, sta);
+
+       return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+                                    MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
+
+static int
+mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                    struct ieee80211_sta *sta)
+{
+#define MT_STA_BSS_GROUP               1
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct {
+               __le32 action;
+               u8 wlan_idx_lo;
+               u8 status;
+               u8 wlan_idx_hi;
+               u8 rsv0[5];
+               __le32 val;
+               u8 rsv1[8];
+       } __packed req = {
+               .action = cpu_to_le32(MT_STA_BSS_GROUP),
+               .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
+               .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
+               .val = cpu_to_le32(mvif->idx % 16),
+       };
+
+       return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_DRR_CTRL), &req,
+                                sizeof(req), true);
+}
+
+static int
+mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                 struct ieee80211_sta *sta)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct sk_buff *skb;
+       int ret;
+
+       if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
+               return 0;
+
+       ret = mt7915_mcu_add_group(dev, vif, sta);
+       if (ret)
+               return ret;
+
+       skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+                                      MT7915_STA_UPDATE_MAX_SIZE);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       /* wait until TxBF and MU ready to update stare vht */
+
+       /* starec muru */
+       mt7915_mcu_sta_muru_tlv(skb, sta);
+       /* starec vht */
+       mt7915_mcu_sta_vht_tlv(skb, sta);
+
+       return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+                                    MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
+
 int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta, bool enable)
 {
@@ -2253,17 +2396,14 @@ int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
 
        /* must keep the order */
        ret = mt7915_mcu_add_txbf(dev, vif, sta, enable);
-       if (ret)
+       if (ret || !enable)
                return ret;
 
        ret = mt7915_mcu_add_mu(dev, vif, sta);
        if (ret)
                return ret;
 
-       if (enable)
-               return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
-
-       return 0;
+       return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
 }
 
 int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -2432,7 +2572,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
                cont->csa_ofs = cpu_to_le16(offs->cntdwn_counter_offs[0] - 4);
 
        buf = (u8 *)tlv + sizeof(*cont);
-       mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL,
+       mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
                              true);
        memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
 }
@@ -3307,7 +3447,8 @@ int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
 {
        struct mt7915_mcu_eeprom_info req = {
-               .addr = cpu_to_le32(round_down(offset, 16)),
+               .addr = cpu_to_le32(round_down(offset,
+                                   MT7915_EEPROM_BLOCK_SIZE)),
        };
        struct mt7915_mcu_eeprom_info *res;
        struct sk_buff *skb;
@@ -3321,7 +3462,7 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
 
        res = (struct mt7915_mcu_eeprom_info *)skb->data;
        buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
-       memcpy(buf, res->data, 16);
+       memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE);
        dev_kfree_skb(skb);
 
        return 0;
@@ -3440,8 +3581,9 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
        struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
-       u16 total = 2, idx, center_freq = chandef->center_freq1;
+       u16 total = 2, center_freq = chandef->center_freq1;
        u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data;
+       int idx;
 
        if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD))
                return 0;
@@ -3469,22 +3611,128 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
        return 0;
 }
 
-int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index)
+int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
+{
+       /* strict order */
+       static const enum mt7915_chan_mib_offs offs[] = {
+               MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME
+       };
+       struct mt76_channel_state *state = phy->mt76->chan_state;
+       struct mt76_channel_state *state_ts = &phy->state_ts;
+       struct mt7915_dev *dev = phy->dev;
+       struct mt7915_mcu_mib *res, req[4];
+       struct sk_buff *skb;
+       int i, ret;
+
+       for (i = 0; i < 4; i++) {
+               req[i].band = cpu_to_le32(phy != &dev->phy);
+               req[i].offs = cpu_to_le32(offs[i]);
+       }
+
+       ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
+                                       req, sizeof(req), true, &skb);
+       if (ret)
+               return ret;
+
+       res = (struct mt7915_mcu_mib *)(skb->data + 20);
+
+       if (chan_switch)
+               goto out;
+
+#define __res_u64(s) le64_to_cpu(res[s].data)
+       state->cc_busy += __res_u64(0) - state_ts->cc_busy;
+       state->cc_tx += __res_u64(1) - state_ts->cc_tx;
+       state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
+       state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
+
+out:
+       state_ts->cc_busy = __res_u64(0);
+       state_ts->cc_tx = __res_u64(1);
+       state_ts->cc_bss_rx = __res_u64(2);
+       state_ts->cc_rx = __res_u64(2) + __res_u64(3);
+#undef __res_u64
+
+       dev_kfree_skb(skb);
+
+       return 0;
+}
+
+int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
 {
+       struct mt7915_dev *dev = phy->dev;
        struct {
                u8 ctrl_id;
                u8 action;
-               u8 band;
+               u8 dbdc_idx;
                u8 rsv[5];
        } req = {
                .ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
-               .action = index,
+               .dbdc_idx = phy != &dev->phy,
        };
 
        return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req,
                                 sizeof(req), true);
 }
 
+int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
+{
+       struct mt7915_dev *dev = phy->dev;
+       struct {
+               struct mt7915_mcu_thermal_ctrl ctrl;
+
+               __le32 trigger_temp;
+               __le32 restore_temp;
+               __le16 sustain_time;
+               u8 rsv[2];
+       } __packed req = {
+               .ctrl = {
+                       .band_idx = phy != &dev->phy,
+               },
+       };
+       int level;
+
+#define TRIGGER_TEMPERATURE    122
+#define RESTORE_TEMPERATURE    116
+#define SUSTAIN_PERIOD         10
+
+       if (!state) {
+               req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
+               goto out;
+       }
+
+       /* set duty cycle and level */
+       for (level = 0; level < 4; level++) {
+               int ret;
+
+               req.ctrl.ctrl_id = THERMAL_PROTECT_DUTY_CONFIG;
+               req.ctrl.duty.duty_level = level;
+               req.ctrl.duty.duty_cycle = state;
+               state = state * 4 / 5;
+
+               ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+                                       &req, sizeof(req.ctrl), false);
+               if (ret)
+                       return ret;
+       }
+
+       /* currently use fixed values for throttling, and would be better
+        * to implement thermal zone for dynamic trip in the long run.
+        */
+
+       /* set high-temperature trigger threshold */
+       req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE;
+       req.trigger_temp = cpu_to_le32(TRIGGER_TEMPERATURE);
+       req.restore_temp = cpu_to_le32(RESTORE_TEMPERATURE);
+       req.sustain_time = cpu_to_le16(SUSTAIN_PERIOD);
+
+out:
+       req.ctrl.type.protect_type = 1;
+       req.ctrl.type.trigger_type = 1;
+
+       return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+                                &req, sizeof(req), false);
+}
+
 int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
 {
        struct {
@@ -3505,7 +3753,6 @@ int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
 
 int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
 {
-#define MT7915_SKU_RATE_NUM            161
        struct mt7915_dev *dev = phy->dev;
        struct mt76_phy *mphy = phy->mt76;
        struct ieee80211_hw *hw = mphy->hw;
@@ -3555,6 +3802,39 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
                                 sizeof(req), true);
 }
 
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
+{
+#define RATE_POWER_INFO        2
+       struct mt7915_dev *dev = phy->dev;
+       struct {
+               u8 format_id;
+               u8 category;
+               u8 band;
+               u8 _rsv;
+       } __packed req = {
+               .format_id = 7,
+               .category = RATE_POWER_INFO,
+               .band = phy != &dev->phy,
+       };
+       s8 res[MT7915_SKU_RATE_NUM][2];
+       struct sk_buff *skb;
+       int ret, i;
+
+       ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+                                       MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
+                                       &req, sizeof(req), true, &skb);
+       if (ret)
+               return ret;
+
+       memcpy(res, skb->data + 4, sizeof(res));
+       for (i = 0; i < len; i++)
+               txpower[i] = res[i][req.band];
+
+       dev_kfree_skb(skb);
+
+       return 0;
+}
+
 int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
                              u8 en)
 {
@@ -3613,57 +3893,50 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band)
                                 &req, sizeof(req), false);
 }
 
-int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev)
-{
-#define MT_BF_MODULE_UPDATE               25
-       struct {
-               u8 action;
-               u8 bf_num;
-               u8 bf_bitmap;
-               u8 bf_sel[8];
-               u8 rsv[8];
-       } __packed req = {
-               .action = MT_BF_MODULE_UPDATE,
-               .bf_num = 2,
-               .bf_bitmap = GENMASK(1, 0),
-       };
-
-       return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
-                                sizeof(req), true);
-}
-
-int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev)
+int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action)
 {
-#define MT_BF_TYPE_UPDATE              20
        struct {
                u8 action;
-               bool ebf;
-               bool ibf;
-               u8 rsv;
+               union {
+                       struct {
+                               u8 snd_mode;
+                               u8 sta_num;
+                               u8 rsv;
+                               u8 wlan_idx[4];
+                               __le32 snd_period;      /* ms */
+                       } __packed snd;
+                       struct {
+                               bool ebf;
+                               bool ibf;
+                               u8 rsv;
+                       } __packed type;
+                       struct {
+                               u8 bf_num;
+                               u8 bf_bitmap;
+                               u8 bf_sel[8];
+                               u8 rsv[5];
+                       } __packed mod;
+               };
        } __packed req = {
-               .action = MT_BF_TYPE_UPDATE,
-               .ebf = true,
-               .ibf = dev->ibf,
+               .action = action,
        };
 
-       return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
-                                sizeof(req), true);
-}
-
-int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev)
-{
-#define MT_BF_PROCESSING               4
-       struct {
-               u8 action;
-               u8 snd_mode;
-               u8 sta_num;
-               u8 rsv;
-               u8 wlan_idx[4];
-               __le32 snd_period;      /* ms */
-       } __packed req = {
-               .action = true,
-               .snd_mode = MT_BF_PROCESSING,
-       };
+#define MT_BF_PROCESSING       4
+       switch (action) {
+       case MT_BF_SOUNDING_ON:
+               req.snd.snd_mode = MT_BF_PROCESSING;
+               break;
+       case MT_BF_TYPE_UPDATE:
+               req.type.ebf = true;
+               req.type.ibf = dev->ibf;
+               break;
+       case MT_BF_MODULE_UPDATE:
+               req.mod.bf_num = 2;
+               req.mod.bf_bitmap = GENMASK(1, 0);
+               break;
+       default:
+               return -EINVAL;
+       }
 
        return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
                                 sizeof(req), true);
index 42582a6..edd3ba3 100644 (file)
@@ -68,6 +68,29 @@ struct mt7915_mcu_rxd {
        u8 s2d_index;
 };
 
+struct mt7915_mcu_thermal_ctrl {
+       u8 ctrl_id;
+       u8 band_idx;
+       union {
+               struct {
+                       u8 protect_type; /* 1: duty admit, 2: radio off */
+                       u8 trigger_type; /* 0: low, 1: high */
+               } __packed type;
+               struct {
+                       u8 duty_level;  /* level 0~3 */
+                       u8 duty_cycle;
+               } __packed duty;
+       };
+} __packed;
+
+struct mt7915_mcu_thermal_notify {
+       struct mt7915_mcu_rxd rxd;
+
+       struct mt7915_mcu_thermal_ctrl ctrl;
+       __le32 temperature;
+       u8 rsv[8];
+} __packed;
+
 struct mt7915_mcu_csa_notify {
        struct mt7915_mcu_rxd rxd;
 
@@ -193,6 +216,19 @@ struct mt7915_mcu_phy_rx_info {
 #define MT_RA_RATE_DCM_EN              BIT(4)
 #define MT_RA_RATE_BW                  GENMASK(14, 13)
 
+struct mt7915_mcu_mib {
+       __le32 band;
+       __le32 offs;
+       __le64 data;
+} __packed;
+
+enum mt7915_chan_mib_offs {
+       MIB_BUSY_TIME = 14,
+       MIB_TX_TIME = 81,
+       MIB_RX_TIME,
+       MIB_OBSS_AIRTIME = 86
+};
+
 struct edca {
        u8 queue;
        u8 set;
@@ -262,6 +298,7 @@ enum {
        MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
        MCU_EXT_CMD_TXBF_ACTION = 0x1e,
        MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+       MCU_EXT_CMD_THERMAL_PROT = 0x23,
        MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
        MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
        MCU_EXT_CMD_EDCA_UPDATE = 0x27,
@@ -277,6 +314,7 @@ enum {
        MCU_EXT_CMD_MUAR_UPDATE = 0x48,
        MCU_EXT_CMD_SET_RX_PATH = 0x4e,
        MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+       MCU_EXT_CMD_GET_MIB_INFO = 0x5a,
        MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
        MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
        MCU_EXT_CMD_SCS_CTRL = 0x82,
@@ -919,7 +957,7 @@ struct sta_rec_ra {
        u8 op_vht_rx_nss;
        u8 op_vht_rx_nss_type;
 
-       __le32 sta_status;
+       __le32 sta_cap;
 
        struct ra_phy phy;
 } __packed;
@@ -1034,18 +1072,17 @@ enum {
        STA_REC_MAX_NUM
 };
 
-enum mt7915_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_WEP128,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_CCMP_256,
-       MT_CIPHER_GCMP,
-       MT_CIPHER_GCMP_256,
-       MT_CIPHER_WAPI,
-       MT_CIPHER_BIP_CMAC_128,
+enum mcu_cipher_type {
+       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_WEP104,
+       MCU_CIPHER_WEP128,
+       MCU_CIPHER_TKIP,
+       MCU_CIPHER_AES_CCMP,
+       MCU_CIPHER_CCMP_256,
+       MCU_CIPHER_GCMP,
+       MCU_CIPHER_GCMP_256,
+       MCU_CIPHER_WAPI,
+       MCU_CIPHER_BIP_CMAC_128,
 };
 
 enum {
@@ -1066,11 +1103,28 @@ enum {
        THERMAL_SENSOR_TASK_CTRL,
 };
 
+enum {
+       THERMAL_PROTECT_PARAMETER_CTRL,
+       THERMAL_PROTECT_BASIC_INFO,
+       THERMAL_PROTECT_ENABLE,
+       THERMAL_PROTECT_DISABLE,
+       THERMAL_PROTECT_DUTY_CONFIG,
+       THERMAL_PROTECT_MECH_INFO,
+       THERMAL_PROTECT_DUTY_INFO,
+       THERMAL_PROTECT_STATE_ACT,
+};
+
 enum {
        MT_EBF = BIT(0),        /* explicit beamforming */
        MT_IBF = BIT(1)         /* implicit beamforming */
 };
 
+enum {
+       MT_BF_SOUNDING_ON = 1,
+       MT_BF_TYPE_UPDATE = 20,
+       MT_BF_MODULE_UPDATE = 25
+};
+
 #define MT7915_WTBL_UPDATE_MAX_SIZE    (sizeof(struct wtbl_req_hdr) +  \
                                         sizeof(struct wtbl_generic) +  \
                                         sizeof(struct wtbl_rx) +       \
index 4ea8972..3f613fa 100644 (file)
@@ -9,7 +9,7 @@
 #include "../mt76.h"
 #include "regs.h"
 
-#define MT7915_MAX_INTERFACES          32
+#define MT7915_MAX_INTERFACES          19
 #define MT7915_MAX_WMM_SETS            4
 #define MT7915_WTBL_SIZE               288
 #define MT7915_WTBL_RESERVED           (MT7915_WTBL_SIZE - 1)
@@ -31,6 +31,7 @@
 #define MT7915_ROM_PATCH               "mediatek/mt7915_rom_patch.bin"
 
 #define MT7915_EEPROM_SIZE             3584
+#define MT7915_EEPROM_BLOCK_SIZE       16
 #define MT7915_TOKEN_SIZE              8192
 
 #define MT7915_CFEND_RATE_DEFAULT      0x49    /* OFDM 24M */
 #define MT7915_5G_RATE_DEFAULT         0x4b    /* OFDM 6M */
 #define MT7915_2G_RATE_DEFAULT         0x0     /* CCK 1M */
 
+#define MT7915_THERMAL_THROTTLE_MAX    100
+
+#define MT7915_SKU_RATE_NUM            161
+
 struct mt7915_vif;
 struct mt7915_sta;
 struct mt7915_dfs_pulse;
@@ -100,6 +105,7 @@ struct mt7915_vif {
        struct mt7915_phy *phy;
 
        struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+       struct cfg80211_bitrate_mask bitrate_mask;
 };
 
 struct mib_stats {
@@ -126,6 +132,9 @@ struct mt7915_phy {
 
        struct ieee80211_vif *monitor_vif;
 
+       struct thermal_cooling_device *cdev;
+       u8 throttle_state;
+
        u32 rxfilter;
        u64 omac_mask;
 
@@ -141,6 +150,7 @@ struct mt7915_phy {
        u32 ampdu_ref;
 
        struct mib_stats mib;
+       struct mt76_channel_state state_ts;
        struct list_head stats_list;
 
        u8 sta_work_count;
@@ -169,6 +179,7 @@ struct mt7915_dev {
        struct mt7915_hif *hif2;
 
        const struct mt76_bus_ops *bus_ops;
+       struct tasklet_struct irq_tasklet;
        struct mt7915_phy phy;
 
        u16 chainmask;
@@ -322,6 +333,8 @@ int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                             bool enable);
 int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                             struct ieee80211_sta *sta);
+int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta);
 int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta);
 int mt7915_set_channel(struct mt7915_phy *phy);
@@ -342,9 +355,8 @@ int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
 int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
 int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
 int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
-int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev);
-int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev);
-int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev);
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
+int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action);
 int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
 int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
                            const struct mt7915_dfs_pulse *pulse);
@@ -352,7 +364,9 @@ int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
                            const struct mt7915_dfs_pattern *pattern);
 int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev);
 int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
-int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index);
+int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch);
+int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
+int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
 int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx);
 int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta, struct rate_info *rate);
@@ -374,9 +388,11 @@ void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
 static inline void mt7915_irq_enable(struct mt7915_dev *dev, u32 mask)
 {
        if (dev->hif2)
-               mt7915_dual_hif_set_irq_mask(dev, true, 0, mask);
+               mt7915_dual_hif_set_irq_mask(dev, false, 0, mask);
        else
-               mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+               mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
+
+       tasklet_schedule(&dev->irq_tasklet);
 }
 
 static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
@@ -392,12 +408,9 @@ void mt7915_mac_reset_counters(struct mt7915_phy *phy);
 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
 void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
-                          struct sk_buff *skb, struct mt76_wcid *wcid,
+                          struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
                           struct ieee80211_key_conf *key, bool beacon);
 void mt7915_mac_set_timing(struct mt7915_phy *phy);
-int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb);
-void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb);
-void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb);
 int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
 void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -417,13 +430,11 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
                         struct sk_buff *skb);
 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
 void mt7915_stats_work(struct work_struct *work);
-void mt7915_txp_skb_unmap(struct mt76_dev *dev,
-                         struct mt76_txwi_cache *txwi);
 int mt76_dfs_start_rdd(struct mt7915_dev *dev, bool force);
 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy);
 void mt7915_set_stream_he_caps(struct mt7915_phy *phy);
 void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
-void mt7915_update_channel(struct mt76_dev *mdev);
+void mt7915_update_channel(struct mt76_phy *mphy);
 int mt7915_init_debugfs(struct mt7915_dev *dev);
 #ifdef CONFIG_MAC80211_DEBUGFS
 void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
index 643f171..340b364 100644 (file)
@@ -94,11 +94,15 @@ mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
 }
 
 /* TODO: support 2/4/6/8 MSI-X vectors */
-static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+static void mt7915_irq_tasklet(struct tasklet_struct *t)
 {
-       struct mt7915_dev *dev = dev_instance;
+       struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
        u32 intr, intr1, mask;
 
+       mt76_wr(dev, MT_INT_MASK_CSR, 0);
+       if (dev->hif2)
+               mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
        intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
        intr &= dev->mt76.mmio.irqmask;
        mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
@@ -111,9 +115,6 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
                intr |= intr1;
        }
 
-       if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
-               return IRQ_NONE;
-
        trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
 
        mask = intr & MT_INT_RX_DONE_ALL;
@@ -150,6 +151,20 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
                        wake_up(&dev->reset_wait);
                }
        }
+}
+
+static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+       struct mt7915_dev *dev = dev_instance;
+
+       mt76_wr(dev, MT_INT_MASK_CSR, 0);
+       if (dev->hif2)
+               mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+       if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+               return IRQ_NONE;
+
+       tasklet_schedule(&dev->irq_tasklet);
 
        return IRQ_HANDLED;
 }
@@ -240,6 +255,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
        if (ret)
                return ret;
 
+       mt76_pci_disable_aspm(pdev);
+
        if (id->device == 0x7916)
                return mt7915_pci_hif2_probe(pdev);
 
@@ -250,10 +267,18 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
 
        dev = container_of(mdev, struct mt7915_dev, mt76);
 
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+       if (ret < 0)
+               goto free;
+
        ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
        if (ret)
                goto error;
 
+       tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+
+       mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
        /* master switch of PCIe tnterrupt enable */
        mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
 
@@ -266,10 +291,14 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
 
        ret = mt7915_register_device(dev);
        if (ret)
-               goto error;
+               goto free_irq;
 
        return 0;
+free_irq:
+       devm_free_irq(mdev->dev, pdev->irq, dev);
 error:
+       pci_free_irq_vectors(pdev);
+free:
        mt76_free_device(&dev->mt76);
 
        return ret;
index efe0f29..a213b5c 100644 (file)
 #define MT_TMAC_CTCR0_INS_DDLMT_EN             BIT(17)
 #define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN   BIT(18)
 
-#define MT_TMAC_FP0R0(_band)           MT_WF_TMAC(_band, 0x020)
-#define MT_TMAC_FP0R15(_band)          MT_WF_TMAC(_band, 0x080)
-#define MT_TMAC_FP0R18(_band)          MT_WF_TMAC(_band, 0x270)
-#define MT_TMAC_FP_MASK                        GENMASK(7, 0)
-
 #define MT_TMAC_TFCR0(_band)           MT_WF_TMAC(_band, 0x1e0)
 
 #define MT_WF_DMA_BASE(_band)          ((_band) ? 0xa1e00 : 0x21e00)
 #define MT_ETBF_TX_FB_CPL              GENMASK(31, 16)
 #define MT_ETBF_TX_FB_TRI              GENMASK(15, 0)
 
+#define MT_ETBF_RX_FB_CONT(_band)      MT_WF_ETBF(_band, 0x068)
+#define MT_ETBF_RX_FB_BW               GENMASK(7, 6)
+#define MT_ETBF_RX_FB_NC               GENMASK(5, 3)
+#define MT_ETBF_RX_FB_NR               GENMASK(2, 0)
+
 #define MT_ETBF_TX_APP_CNT(_band)      MT_WF_ETBF(_band, 0x0f0)
 #define MT_ETBF_TX_IBF_CNT             GENMASK(31, 16)
 #define MT_ETBF_TX_EBF_CNT             GENMASK(15, 0)
 #define MT_LPON_TCR(_band, n)          MT_WF_LPON(_band, 0x0a8 + (n) * 4)
 #define MT_LPON_TCR_SW_MODE            GENMASK(1, 0)
 #define MT_LPON_TCR_SW_WRITE           BIT(0)
+#define MT_LPON_TCR_SW_ADJUST          BIT(1)
+#define MT_LPON_TCR_SW_READ            GENMASK(1, 0)
 
 /* MIB: band 0(0x24800), band 1(0xa4800) */
 #define MT_WF_MIB_BASE(_band)          ((_band) ? 0xa4800 : 0x24800)
 #define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x014)
 #define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(15, 0)
 
-#define MT_MIB_SDR9(_band)             MT_WF_MIB(_band, 0x02c)
-#define MT_MIB_SDR9_BUSY_MASK          GENMASK(23, 0)
-
-#define MT_MIB_SDR16(_band)            MT_WF_MIB(_band, 0x048)
-#define MT_MIB_SDR16_BUSY_MASK         GENMASK(23, 0)
-
 #define MT_MIB_SDR34(_band)            MT_WF_MIB(_band, 0x090)
 #define MT_MIB_MU_BF_TX_CNT            GENMASK(15, 0)
 
-#define MT_MIB_SDR36(_band)            MT_WF_MIB(_band, 0x098)
-#define MT_MIB_SDR36_TXTIME_MASK       GENMASK(23, 0)
-#define MT_MIB_SDR37(_band)            MT_WF_MIB(_band, 0x09c)
-#define MT_MIB_SDR37_RXTIME_MASK       GENMASK(23, 0)
-
 #define MT_MIB_DR8(_band)              MT_WF_MIB(_band, 0x0c0)
 #define MT_MIB_DR9(_band)              MT_WF_MIB(_band, 0x0c4)
 #define MT_MIB_DR11(_band)             MT_WF_MIB(_band, 0x0cc)
 #define MT_MIB_BA_MISS_COUNT_MASK      GENMASK(15, 0)
 #define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(31, 16)
 
-#define MT_MIB_MB_SDR2(_band, n)       MT_WF_MIB(_band, 0x108 + ((n) << 4))
-#define MT_MIB_FRAME_RETRIES_COUNT_MASK        GENMASK(15, 0)
-
 #define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
 #define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x164 + ((n) << 2))
 #define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
 #define MT_WF_RFCR1_DROP_CFEND         BIT(7)
 #define MT_WF_RFCR1_DROP_CFACK         BIT(8)
 
-#define MT_WF_RMAC_MIB_TIME0(_band)    MT_WF_RMAC(_band, 0x03c4)
+#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
 #define MT_WF_RMAC_MIB_RXTIME_CLR      BIT(31)
 #define MT_WF_RMAC_MIB_RXTIME_EN       BIT(30)
 
-#define MT_WF_RMAC_MIB_AIRTIME14(_band)        MT_WF_RMAC(_band, 0x03b8)
-#define MT_MIB_OBSSTIME_MASK           GENMASK(23, 0)
-#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
-
 /* WFDMA0 */
 #define MT_WFDMA0_BASE                 0xd4000
 #define MT_WFDMA0(ofs)                 (MT_WFDMA0_BASE + (ofs))
index f9d81e3..b220b33 100644 (file)
@@ -464,10 +464,17 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
 static void
 mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
 {
-       if (en)
+       mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
+
+       if (en) {
+               struct mt7915_dev *dev = phy->dev;
+
                mt7915_tm_update_channel(phy);
 
-       mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
+               /* read-clear */
+               mt76_rr(dev, MT_MIB_SDR3(phy != &dev->phy));
+               mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
+       }
 }
 
 static int
@@ -690,7 +697,11 @@ static int
 mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
 {
        struct mt7915_phy *phy = mphy->priv;
+       struct mt7915_dev *dev = phy->dev;
+       bool ext_phy = phy != &dev->phy;
+       enum mt76_rxq_id q;
        void *rx, *rssi;
+       u16 fcs_err;
        int i;
 
        rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
@@ -735,6 +746,12 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
 
        nla_nest_end(msg, rx);
 
+       fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+                                MT_MIB_SDR3_FCS_ERR_MASK);
+       q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
+       mphy->test.rx_stats.packets[q] += fcs_err;
+       mphy->test.rx_stats.fcs_error[q] += fcs_err;
+
        return 0;
 }
 
index 8f8533e..397a6b5 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+/* SPDX-License-Identifier: ISC */
 /* Copyright (C) 2020 MediaTek Inc. */
 
 #ifndef __MT7915_TESTMODE_H
index e531666..0ebb599 100644 (file)
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: ISC
 
 obj-$(CONFIG_MT7921E) += mt7921e.o
 
index 6ee423d..77468bd 100644 (file)
@@ -184,7 +184,10 @@ mt7921_txpwr(struct seq_file *s, void *data)
        struct mt7921_txpwr txpwr;
        int ret;
 
+       mt7921_mutex_acquire(dev);
        ret = mt7921_get_txpwr_info(dev, &txpwr);
+       mt7921_mutex_release(dev);
+
        if (ret)
                return ret;
 
@@ -247,6 +250,9 @@ mt7921_pm_set(void *data, u64 val)
        ieee80211_iterate_active_interfaces(mphy->hw,
                                            IEEE80211_IFACE_ITER_RESUME_ALL,
                                            mt7921_pm_interface_iter, mphy->priv);
+
+       mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+
        mt7921_mutex_release(dev);
 
        return 0;
@@ -264,6 +270,36 @@ mt7921_pm_get(void *data, u64 *val)
 
 DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld\n");
 
+static int
+mt7921_deep_sleep_set(void *data, u64 val)
+{
+       struct mt7921_dev *dev = data;
+       struct mt76_connac_pm *pm = &dev->pm;
+       bool enable = !!val;
+
+       mt7921_mutex_acquire(dev);
+       if (pm->ds_enable != enable) {
+               mt76_connac_mcu_set_deep_sleep(&dev->mt76, enable);
+               pm->ds_enable = enable;
+       }
+       mt7921_mutex_release(dev);
+
+       return 0;
+}
+
+static int
+mt7921_deep_sleep_get(void *data, u64 *val)
+{
+       struct mt7921_dev *dev = data;
+
+       *val = dev->pm.ds_enable;
+
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ds, mt7921_deep_sleep_get,
+                        mt7921_deep_sleep_set, "%lld\n");
+
 static int
 mt7921_pm_stats(struct seq_file *s, void *data)
 {
@@ -355,6 +391,7 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
        debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset);
        debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir,
                                    mt7921_pm_stats);
+       debugfs_create_file("deep-sleep", 0600, dir, dev, &fops_ds);
 
        return 0;
 }
index 71e664e..7d7d43a 100644 (file)
@@ -74,7 +74,7 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget)
        mt7921_tx_cleanup(dev);
        if (napi_complete(napi))
                mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 
        return 0;
 }
@@ -92,7 +92,7 @@ static int mt7921_poll_rx(struct napi_struct *napi, int budget)
                return 0;
        }
        done = mt76_dma_rx_poll(napi, budget);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 
        return done;
 }
@@ -313,9 +313,9 @@ static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
 
 int mt7921_wfsys_reset(struct mt7921_dev *dev)
 {
-       mt76_set(dev, 0x70002600, BIT(0));
-       msleep(200);
-       mt76_clear(dev, 0x70002600, BIT(0));
+       mt76_clear(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
+       msleep(50);
+       mt76_set(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
 
        if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
                              WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500))
@@ -380,9 +380,7 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
 
 int mt7921_dma_init(struct mt7921_dev *dev)
 {
-       /* Increase buffer size to receive large VHT/HE MPDUs */
        struct mt76_bus_ops *bus_ops;
-       int rx_buf_size = MT_RX_BUF_SIZE * 2;
        int ret;
 
        dev->bus_ops = dev->mt76.bus;
@@ -402,6 +400,10 @@ int mt7921_dma_init(struct mt7921_dev *dev)
        if (ret)
                return ret;
 
+       ret = mt7921_wfsys_reset(dev);
+       if (ret)
+               return ret;
+
        /* init tx queue */
        ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
                                    MT7921_TX_RING_SIZE);
@@ -426,7 +428,7 @@ int mt7921_dma_init(struct mt7921_dev *dev)
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
                               MT7921_RXQ_MCU_WM,
                               MT7921_RX_MCU_RING_SIZE,
-                              rx_buf_size, MT_RX_EVENT_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
        if (ret)
                return ret;
 
@@ -434,14 +436,14 @@ int mt7921_dma_init(struct mt7921_dev *dev)
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
                               MT7921_RXQ_MCU_WM,
                               MT7921_RX_MCU_RING_SIZE,
-                              rx_buf_size, MT_WFDMA0(0x540));
+                              MT_RX_BUF_SIZE, MT_WFDMA0(0x540));
        if (ret)
                return ret;
 
        /* rx data */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
                               MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE,
-                              rx_buf_size, MT_RX_DATA_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
        if (ret)
                return ret;
 
index 1763ea0..a9ce10b 100644 (file)
@@ -7,34 +7,6 @@
 #include "mcu.h"
 #include "eeprom.h"
 
-#define CCK_RATE(_idx, _rate) {                                                \
-       .bitrate = _rate,                                               \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                         \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),                    \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)),        \
-}
-
-#define OFDM_RATE(_idx, _rate) {                                       \
-       .bitrate = _rate,                                               \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),                   \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),             \
-}
-
-static struct ieee80211_rate mt7921_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(11, 60),
-       OFDM_RATE(15, 90),
-       OFDM_RATE(10, 120),
-       OFDM_RATE(14, 180),
-       OFDM_RATE(9,  240),
-       OFDM_RATE(13, 360),
-       OFDM_RATE(8,  480),
-       OFDM_RATE(12, 540),
-};
-
 static const struct ieee80211_iface_limit if_limits[] = {
        {
                .max = MT7921_MAX_INTERFACES,
@@ -73,11 +45,13 @@ static void
 mt7921_init_wiphy(struct ieee80211_hw *hw)
 {
        struct mt7921_phy *phy = mt7921_hw_phy(hw);
+       struct mt7921_dev *dev = phy->dev;
        struct wiphy *wiphy = hw->wiphy;
 
        hw->queues = 4;
        hw->max_rx_aggregation_subframes = 64;
        hw->max_tx_aggregation_subframes = 128;
+       hw->netdev_features = NETIF_F_RXCSUM;
 
        hw->radiotap_timestamp.units_pos =
                IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
@@ -88,11 +62,13 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
        hw->vif_data_size = sizeof(struct mt7921_vif);
 
        wiphy->iface_combinations = if_comb;
+       wiphy->flags &= ~WIPHY_FLAG_IBSS_RSN;
+       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
        wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
        wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
        wiphy->max_scan_ssids = 4;
        wiphy->max_sched_scan_plan_interval =
-               MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL;
+               MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL;
        wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
        wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
        wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH;
@@ -100,46 +76,33 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
        wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        wiphy->reg_notifier = mt7921_regd_notifier;
 
-       wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+       wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+                          NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
        wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
 
        ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
        ieee80211_hw_set(hw, HAS_RATE_CONTROL);
        ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+       ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
        ieee80211_hw_set(hw, WANT_MONITOR_VIF);
        ieee80211_hw_set(hw, SUPPORTS_PS);
        ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
 
+       if (dev->pm.enable)
+               ieee80211_hw_set(hw, CONNECTION_MONITOR);
+
        hw->max_tx_fragments = 4;
 }
 
 static void
 mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
 {
-       u32 mask, set;
-
        mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
                       MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
        mt76_set(dev, MT_TMAC_CTCR0(band),
                 MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
                 MT_TMAC_CTCR0_INS_DDLMT_EN);
 
-       mask = MT_MDP_RCFR0_MCU_RX_MGMT |
-              MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR |
-              MT_MDP_RCFR0_MCU_RX_CTL_BAR;
-       set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) |
-             FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) |
-             FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF);
-       mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set);
-
-       mask = MT_MDP_RCFR1_MCU_RX_BYPASS |
-              MT_MDP_RCFR1_RX_DROPPED_UCAST |
-              MT_MDP_RCFR1_RX_DROPPED_MCAST;
-       set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) |
-             FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) |
-             FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
-       mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
-
        mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
        mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
 
@@ -148,14 +111,15 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
        mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
 }
 
-void mt7921_mac_init(struct mt7921_dev *dev)
+int mt7921_mac_init(struct mt7921_dev *dev)
 {
        int i;
 
        mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536);
-       /* disable hardware de-agg */
-       mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
-       mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_RX_HDR_TRANS_EN);
+       /* enable hardware de-agg */
+       mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
+       /* enable hardware rx header translation */
+       mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_RX_HDR_TRANS_EN);
 
        for (i = 0; i < MT7921_WTBL_SIZE; i++)
                mt7921_mac_wtbl_update(dev, i,
@@ -163,7 +127,7 @@ void mt7921_mac_init(struct mt7921_dev *dev)
        for (i = 0; i < 2; i++)
                mt7921_mac_init_band(dev, i);
 
-       mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
+       return mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
 }
 
 static int mt7921_init_hardware(struct mt7921_dev *dev)
@@ -203,9 +167,7 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
        dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
        rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
 
-       mt7921_mac_init(dev);
-
-       return 0;
+       return mt7921_mac_init(dev);
 }
 
 int mt7921_register_device(struct mt7921_dev *dev)
@@ -224,7 +186,6 @@ int mt7921_register_device(struct mt7921_dev *dev)
        mutex_init(&dev->pm.mutex);
        init_waitqueue_head(&dev->pm.wait);
        spin_lock_init(&dev->pm.txq_lock);
-       set_bit(MT76_STATE_PM, &dev->mphy.state);
        INIT_LIST_HEAD(&dev->phy.stats_list);
        INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work);
        INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work);
@@ -239,6 +200,8 @@ int mt7921_register_device(struct mt7921_dev *dev)
        dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
        dev->pm.stats.last_wake_event = jiffies;
        dev->pm.stats.last_doze_event = jiffies;
+       dev->pm.enable = true;
+       dev->pm.ds_enable = true;
 
        ret = mt7921_init_hardware(dev);
        if (ret)
@@ -253,19 +216,33 @@ int mt7921_register_device(struct mt7921_dev *dev)
                        IEEE80211_HT_CAP_MAX_AMSDU;
        dev->mphy.sband_5g.sband.vht_cap.cap |=
                        IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
-                       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+                       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+                       IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+                       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+                       (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+
        dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
        dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
 
        mt76_set_stream_caps(&dev->mphy, true);
        mt7921_set_stream_he_caps(&dev->phy);
 
-       ret = mt76_register_device(&dev->mt76, true, mt7921_rates,
-                                  ARRAY_SIZE(mt7921_rates));
+       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
+       if (ret)
+               return ret;
+
+       ret = mt7921_init_debugfs(dev);
        if (ret)
                return ret;
 
-       return mt7921_init_debugfs(dev);
+       ret = mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
+       if (ret)
+               return ret;
+
+       dev->hw_init_done = true;
+
+       return 0;
 }
 
 void mt7921_unregister_device(struct mt7921_dev *dev)
index decf2d5..7fe2e3a 100644 (file)
@@ -308,21 +308,24 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
 
 int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
 {
+       u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
        struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+       bool hdr_trans, unicast, insert_ccmp_hdr = false;
+       u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
+       __le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
        struct mt76_phy *mphy = &dev->mt76.phy;
        struct mt7921_phy *phy = &dev->phy;
        struct ieee80211_supported_band *sband;
        struct ieee80211_hdr *hdr;
-       __le32 *rxd = (__le32 *)skb->data;
-       __le32 *rxv = NULL;
-       u32 mode = 0;
+       u32 rxd0 = le32_to_cpu(rxd[0]);
        u32 rxd1 = le32_to_cpu(rxd[1]);
        u32 rxd2 = le32_to_cpu(rxd[2]);
        u32 rxd3 = le32_to_cpu(rxd[3]);
-       bool unicast, insert_ccmp_hdr = false;
-       u8 remove_pad;
+       u32 rxd4 = le32_to_cpu(rxd[4]);
+       u16 seq_ctrl = 0;
+       __le16 fc = 0;
+       u32 mode = 0;
        int i, idx;
-       u8 chfreq;
 
        memset(status, 0, sizeof(*status));
 
@@ -332,9 +335,13 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
        if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
                return -EINVAL;
 
+       if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
+               return -EINVAL;
+
        chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
        unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
        idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
+       hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
        status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
 
        if (status->wcid) {
@@ -357,6 +364,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
        if (!sband->channels)
                return -EINVAL;
 
+       if ((rxd0 & csum_mask) == csum_mask)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
        if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
                status->flag |= RX_FLAG_FAILED_FCS_CRC;
 
@@ -377,6 +387,13 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
 
        rxd += 6;
        if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
+               u32 v0 = le32_to_cpu(rxd[0]);
+               u32 v2 = le32_to_cpu(rxd[2]);
+
+               fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
+               seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
+               qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
+
                rxd += 4;
                if ((u8 *)rxd - skb->data >= skb->len)
                        return -EINVAL;
@@ -386,14 +403,27 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
                u8 *data = (u8 *)rxd;
 
                if (status->flag & RX_FLAG_DECRYPTED) {
-                       status->iv[0] = data[5];
-                       status->iv[1] = data[4];
-                       status->iv[2] = data[3];
-                       status->iv[3] = data[2];
-                       status->iv[4] = data[1];
-                       status->iv[5] = data[0];
-
-                       insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                       switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
+                       case MT_CIPHER_AES_CCMP:
+                       case MT_CIPHER_CCMP_CCX:
+                       case MT_CIPHER_CCMP_256:
+                               insert_ccmp_hdr =
+                                       FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                               fallthrough;
+                       case MT_CIPHER_TKIP:
+                       case MT_CIPHER_TKIP_NO_MIC:
+                       case MT_CIPHER_GCMP:
+                       case MT_CIPHER_GCMP_256:
+                               status->iv[0] = data[5];
+                               status->iv[1] = data[4];
+                               status->iv[2] = data[3];
+                               status->iv[3] = data[2];
+                               status->iv[4] = data[1];
+                               status->iv[5] = data[0];
+                               break;
+                       default:
+                               break;
+                       }
                }
                rxd += 4;
                if ((u8 *)rxd - skb->data >= skb->len)
@@ -444,16 +474,19 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
                status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
                status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
                status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
-               status->signal = status->chain_signal[0];
-
-               for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
-                       if (!(status->chains & BIT(i)))
+               status->signal = -128;
+               for (i = 0; i < hweight8(mphy->antenna_mask); i++) {
+                       if (!(status->chains & BIT(i)) ||
+                           status->chain_signal[i] >= 0)
                                continue;
 
                        status->signal = max(status->signal,
                                             status->chain_signal[i]);
                }
 
+               if (status->signal == -128)
+                       status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
                stbc = FIELD_GET(MT_PRXV_STBC, v0);
                gi = FIELD_GET(MT_PRXV_SGI, v0);
                cck = false;
@@ -540,10 +573,35 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
 
        skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
 
-       if (insert_ccmp_hdr) {
-               u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+       amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
+       status->amsdu = !!amsdu_info;
+       if (status->amsdu) {
+               status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
+               status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
+               if (!hdr_trans) {
+                       memmove(skb->data + 2, skb->data,
+                               ieee80211_get_hdrlen_from_skb(skb));
+                       skb_pull(skb, 2);
+               }
+       }
+
+       if (!hdr_trans) {
+               if (insert_ccmp_hdr) {
+                       u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+                       mt76_insert_ccmp_hdr(skb, key_id);
+               }
 
-               mt76_insert_ccmp_hdr(skb, key_id);
+               hdr = mt76_skb_get_hdr(skb);
+               fc = hdr->frame_control;
+               if (ieee80211_is_data_qos(fc)) {
+                       seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
+                       qos_ctl = *ieee80211_get_qos_ctl(hdr);
+               }
+       } else {
+               status->flag &= ~(RX_FLAG_RADIOTAP_HE |
+                                 RX_FLAG_RADIOTAP_HE_MU);
+               status->flag |= RX_FLAG_8023;
        }
 
        mt7921_mac_assoc_rssi(dev, skb);
@@ -551,14 +609,12 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
        if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
                mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
 
-       hdr = mt76_skb_get_hdr(skb);
-       if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+       if (!status->wcid || !ieee80211_is_data_qos(fc))
                return 0;
 
-       status->aggr = unicast &&
-                      !ieee80211_is_qos_nullfunc(hdr->frame_control);
-       status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
-       status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+       status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
+       status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
+       status->qos_ctl = qos_ctl;
 
        return 0;
 }
@@ -676,6 +732,23 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
        txwi[7] |= cpu_to_le32(val);
 }
 
+static void mt7921_update_txs(struct mt76_wcid *wcid, __le32 *txwi)
+{
+       struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid);
+       u32 pid, frame_type = FIELD_GET(MT_TXD2_FRAME_TYPE, txwi[2]);
+
+       if (!(frame_type & (IEEE80211_FTYPE_DATA >> 2)))
+               return;
+
+       if (time_is_after_eq_jiffies(msta->next_txs_ts))
+               return;
+
+       msta->next_txs_ts = jiffies + msecs_to_jiffies(250);
+       pid = mt76_get_next_pkt_id(wcid);
+       txwi[5] |= cpu_to_le32(MT_TXD5_TX_STATUS_MCU |
+                              FIELD_PREP(MT_TXD5_PID, pid));
+}
+
 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
                           struct sk_buff *skb, struct mt76_wcid *wcid,
                           struct ieee80211_key_conf *key, bool beacon)
@@ -752,6 +825,8 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
                txwi[6] |= cpu_to_le32(val);
                txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
        }
+
+       mt7921_update_txs(wcid, txwi);
 }
 
 static void
@@ -1154,18 +1229,18 @@ mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
        state->noise = -(phy->noise >> 4);
 }
 
-void mt7921_update_channel(struct mt76_dev *mdev)
+void mt7921_update_channel(struct mt76_phy *mphy)
 {
-       struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
 
-       if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
+       if (mt76_connac_pm_wake(mphy, &dev->pm))
                return;
 
-       mt7921_phy_update_channel(&mdev->phy, 0);
+       mt7921_phy_update_channel(mphy, 0);
        /* reset obss airtime */
        mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
 
-       mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+       mt76_connac_power_save_sched(mphy, &dev->pm);
 }
 
 void mt7921_tx_token_put(struct mt7921_dev *dev)
@@ -1196,7 +1271,8 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
        struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
        struct mt7921_dev *dev = mvif->phy->dev;
 
-       ieee80211_disconnect(vif, true);
+       if (vif->type == NL80211_IFTYPE_STATION)
+               ieee80211_disconnect(vif, true);
 
        mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
        mt7921_mcu_set_tx(dev, vif);
@@ -1212,6 +1288,7 @@ mt7921_mac_reset(struct mt7921_dev *dev)
        mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
        mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
 
+       set_bit(MT76_RESET, &dev->mphy.state);
        set_bit(MT76_MCU_RESET, &dev->mphy.state);
        wake_up(&dev->mt76.mcu.wait);
        skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -1227,56 +1304,64 @@ mt7921_mac_reset(struct mt7921_dev *dev)
        mt7921_tx_token_put(dev);
        idr_init(&dev->mt76.token);
 
-       err = mt7921_wpdma_reset(dev, true);
-       if (err)
-               return err;
+       mt7921_wpdma_reset(dev, true);
 
        mt76_for_each_q_rx(&dev->mt76, i) {
                napi_enable(&dev->mt76.napi[i]);
                napi_schedule(&dev->mt76.napi[i]);
        }
 
-       napi_enable(&dev->mt76.tx_napi);
-       napi_schedule(&dev->mt76.tx_napi);
-       mt76_worker_enable(&dev->mt76.tx_worker);
-
        clear_bit(MT76_MCU_RESET, &dev->mphy.state);
-       clear_bit(MT76_STATE_PM, &dev->mphy.state);
 
-       mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+       mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
+               MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+               MT_INT_MCU_CMD);
        mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
 
        err = mt7921_run_firmware(dev);
        if (err)
-               return err;
+               goto out;
 
        err = mt7921_mcu_set_eeprom(dev);
        if (err)
-               return err;
+               goto out;
 
-       mt7921_mac_init(dev);
-       return __mt7921_start(&dev->phy);
+       err = mt7921_mac_init(dev);
+       if (err)
+               goto out;
+
+       err = __mt7921_start(&dev->phy);
+out:
+       clear_bit(MT76_RESET, &dev->mphy.state);
+
+       napi_enable(&dev->mt76.tx_napi);
+       napi_schedule(&dev->mt76.tx_napi);
+       mt76_worker_enable(&dev->mt76.tx_worker);
+
+       return err;
 }
 
 /* system error recovery */
 void mt7921_mac_reset_work(struct work_struct *work)
 {
-       struct ieee80211_hw *hw;
-       struct mt7921_dev *dev;
+       struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+                                             reset_work);
+       struct ieee80211_hw *hw = mt76_hw(dev);
+       struct mt76_connac_pm *pm = &dev->pm;
        int i;
 
-       dev = container_of(work, struct mt7921_dev, reset_work);
-       hw = mt76_hw(dev);
-
        dev_err(dev->mt76.dev, "chip reset\n");
+       dev->hw_full_reset = true;
        ieee80211_stop_queues(hw);
 
        cancel_delayed_work_sync(&dev->mphy.mac_work);
-       cancel_delayed_work_sync(&dev->pm.ps_work);
-       cancel_work_sync(&dev->pm.wake_work);
+       cancel_delayed_work_sync(&pm->ps_work);
+       cancel_work_sync(&pm->wake_work);
 
        mutex_lock(&dev->mt76.mutex);
        for (i = 0; i < 10; i++) {
+               __mt7921_mcu_drv_pmctrl(dev);
+
                if (!mt7921_mac_reset(dev))
                        break;
        }
@@ -1293,16 +1378,24 @@ void mt7921_mac_reset_work(struct work_struct *work)
                ieee80211_scan_completed(dev->mphy.hw, &info);
        }
 
+       dev->hw_full_reset = false;
        ieee80211_wake_queues(hw);
        ieee80211_iterate_active_interfaces(hw,
                                            IEEE80211_IFACE_ITER_RESUME_ALL,
                                            mt7921_vif_connect_iter, NULL);
+       mt76_connac_power_save_sched(&dev->mt76.phy, pm);
 }
 
 void mt7921_reset(struct mt76_dev *mdev)
 {
        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
 
+       if (!dev->hw_init_done)
+               return;
+
+       if (dev->hw_full_reset)
+               return;
+
        queue_work(dev->mt76.wq, &dev->reset_work);
 }
 
@@ -1337,30 +1430,6 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
        }
 }
 
-static void
-mt7921_mac_sta_stats_work(struct mt7921_phy *phy)
-{
-       struct mt7921_dev *dev = phy->dev;
-       struct mt7921_sta *msta;
-       LIST_HEAD(list);
-
-       spin_lock_bh(&dev->sta_poll_lock);
-       list_splice_init(&phy->stats_list, &list);
-
-       while (!list_empty(&list)) {
-               msta = list_first_entry(&list, struct mt7921_sta, stats_list);
-               list_del_init(&msta->stats_list);
-               spin_unlock_bh(&dev->sta_poll_lock);
-
-               /* query wtbl info to report tx rate for further devices */
-               mt7921_get_wtbl_info(dev, msta->wcid.idx);
-
-               spin_lock_bh(&dev->sta_poll_lock);
-       }
-
-       spin_unlock_bh(&dev->sta_poll_lock);
-}
-
 void mt7921_mac_work(struct work_struct *work)
 {
        struct mt7921_phy *phy;
@@ -1372,16 +1441,12 @@ void mt7921_mac_work(struct work_struct *work)
 
        mt7921_mutex_acquire(phy->dev);
 
-       mt76_update_survey(mphy->dev);
+       mt76_update_survey(mphy);
        if (++mphy->mac_work_count == 2) {
                mphy->mac_work_count = 0;
 
                mt7921_mac_update_mib_stats(phy);
        }
-       if (++phy->sta_work_count == 4) {
-               phy->sta_work_count = 0;
-               mt7921_mac_sta_stats_work(phy);
-       }
 
        mt7921_mutex_release(phy->dev);
        ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
@@ -1417,13 +1482,15 @@ void mt7921_pm_power_save_work(struct work_struct *work)
 {
        struct mt7921_dev *dev;
        unsigned long delta;
+       struct mt76_phy *mphy;
 
        dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
                                                pm.ps_work.work);
+       mphy = dev->phy.mt76;
 
        delta = dev->pm.idle_timeout;
-       if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
-           test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
+       if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
+           test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
                goto out;
 
        if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
@@ -1431,8 +1498,10 @@ void mt7921_pm_power_save_work(struct work_struct *work)
                goto out;
        }
 
-       if (!mt7921_mcu_fw_pmctrl(dev))
+       if (!mt7921_mcu_fw_pmctrl(dev)) {
+               cancel_delayed_work_sync(&mphy->mac_work);
                return;
+       }
 out:
        queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
 }
@@ -1494,7 +1563,7 @@ void mt7921_coredump_work(struct work_struct *work)
                        break;
 
                skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
-               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
+               if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
                        dev_kfree_skb(skb);
                        continue;
                }
@@ -1504,7 +1573,10 @@ void mt7921_coredump_work(struct work_struct *work)
 
                dev_kfree_skb(skb);
        }
-       dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
-                     GFP_KERNEL);
+
+       if (dump)
+               dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+                             GFP_KERNEL);
+
        mt7921_reset(&dev->mt76);
 }
index 109c884..3af67fa 100644 (file)
@@ -88,6 +88,9 @@ enum rx_pkt_type {
 
 /* RXD DW4 */
 #define MT_RXD4_NORMAL_PAYLOAD_FORMAT  GENMASK(1, 0)
+#define MT_RXD4_FIRST_AMSDU_FRAME      GENMASK(1, 0)
+#define MT_RXD4_MID_AMSDU_FRAME                BIT(1)
+#define MT_RXD4_LAST_AMSDU_FRAME       BIT(0)
 #define MT_RXD4_NORMAL_PATTERN_DROP    BIT(9)
 #define MT_RXD4_NORMAL_CLS             BIT(10)
 #define MT_RXD4_NORMAL_OFLD            GENMASK(12, 11)
@@ -97,6 +100,17 @@ enum rx_pkt_type {
 #define MT_RXD3_NORMAL_PF_MODE         BIT(29)
 #define MT_RXD3_NORMAL_PF_STS          GENMASK(31, 30)
 
+/* RXD GROUP4 */
+#define MT_RXD6_FRAME_CONTROL          GENMASK(15, 0)
+#define MT_RXD6_TA_LO                  GENMASK(31, 16)
+
+#define MT_RXD7_TA_HI                  GENMASK(31, 0)
+
+#define MT_RXD8_SEQ_CTRL               GENMASK(15, 0)
+#define MT_RXD8_QOS_CTL                        GENMASK(31, 16)
+
+#define MT_RXD9_HT_CONTROL             GENMASK(31, 0)
+
 /* P-RXV DW0 */
 #define MT_PRXV_TX_RATE                        GENMASK(6, 0)
 #define MT_PRXV_TX_DCM                 BIT(4)
index 97a0ef3..7fd2104 100644 (file)
@@ -79,13 +79,14 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
                he_cap_elem->phy_cap_info[1] =
                        IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
                he_cap_elem->phy_cap_info[2] =
+                       IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
                        IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
-                       IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+                       IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+                       IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+                       IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
 
                switch (i) {
                case NL80211_IFTYPE_STATION:
-                       he_cap_elem->mac_cap_info[0] |=
-                               IEEE80211_HE_MAC_CAP0_TWT_REQ;
                        he_cap_elem->mac_cap_info[1] |=
                                IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
 
@@ -102,7 +103,15 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
                        he_cap_elem->phy_cap_info[3] |=
                                IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
                                IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+                       he_cap_elem->phy_cap_info[4] |=
+                               IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+                               IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+                       he_cap_elem->phy_cap_info[5] |=
+                               IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+                               IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
                        he_cap_elem->phy_cap_info[6] |=
+                               IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+                               IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
                                IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
                                IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
                                IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
@@ -223,54 +232,6 @@ static void mt7921_stop(struct ieee80211_hw *hw)
        mt7921_mutex_release(dev);
 }
 
-static inline int get_free_idx(u32 mask, u8 start, u8 end)
-{
-       return ffs(~mask & GENMASK(end, start));
-}
-
-static int get_omac_idx(enum nl80211_iftype type, u64 mask)
-{
-       int i;
-
-       switch (type) {
-       case NL80211_IFTYPE_STATION:
-               /* prefer hw bssid slot 1-3 */
-               i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
-               if (i)
-                       return i - 1;
-
-               /* next, try to find a free repeater entry for the sta */
-               i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
-                                REPEATER_BSSID_MAX - REPEATER_BSSID_START);
-               if (i)
-                       return i + 32 - 1;
-
-               i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
-               if (i)
-                       return i - 1;
-
-               if (~mask & BIT(HW_BSSID_0))
-                       return HW_BSSID_0;
-
-               break;
-       case NL80211_IFTYPE_MONITOR:
-               /* ap uses hw bssid 0 and ext bssid */
-               if (~mask & BIT(HW_BSSID_0))
-                       return HW_BSSID_0;
-
-               i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
-               if (i)
-                       return i - 1;
-
-               break;
-       default:
-               WARN_ON(1);
-               break;
-       }
-
-       return -1;
-}
-
 static int mt7921_add_interface(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif)
 {
@@ -292,12 +253,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
                goto out;
        }
 
-       idx = get_omac_idx(vif->type, phy->omac_mask);
-       if (idx < 0) {
-               ret = -ENOSPC;
-               goto out;
-       }
-       mvif->mt76.omac_idx = idx;
+       mvif->mt76.omac_idx = mvif->mt76.idx;
        mvif->phy = phy;
        mvif->mt76.band_idx = 0;
        mvif->mt76.wmm_idx = mvif->mt76.idx % MT7921_MAX_WMM_SETS;
@@ -369,7 +325,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
        spin_unlock_bh(&dev->sta_poll_lock);
 }
 
-int mt7921_set_channel(struct mt7921_phy *phy)
+static int mt7921_set_channel(struct mt7921_phy *phy)
 {
        struct mt7921_dev *dev = phy->dev;
        int ret;
@@ -429,6 +385,10 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
                wcid_keyidx = &wcid->hw_key_idx2;
                break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               if (!mvif->wep_sta)
+                       return -EOPNOTSUPP;
        case WLAN_CIPHER_SUITE_TKIP:
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_CCMP_256:
@@ -436,8 +396,6 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        case WLAN_CIPHER_SUITE_GCMP_256:
        case WLAN_CIPHER_SUITE_SMS4:
                break;
-       case WLAN_CIPHER_SUITE_WEP40:
-       case WLAN_CIPHER_SUITE_WEP104:
        default:
                return -EOPNOTSUPP;
        }
@@ -455,6 +413,12 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                            cmd == SET_KEY ? key : NULL);
 
        err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+       if (err)
+               goto out;
+
+       if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
+           key->cipher == WLAN_CIPHER_SUITE_WEP40)
+               err = mt7921_mcu_add_key(dev, vif, mvif->wep_sta, key, cmd);
 out:
        mt7921_mutex_release(dev);
 
@@ -477,6 +441,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
 
        mt7921_mutex_acquire(dev);
 
+       if (changed & IEEE80211_CONF_CHANGE_POWER)
+               mt76_connac_mcu_set_rate_txpower(phy->mt76);
+
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
                bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
 
@@ -622,7 +589,8 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
                mt7921_mcu_uni_bss_ps(dev, vif);
 
        if (changed & BSS_CHANGED_ASSOC) {
-               mt7921_mcu_sta_add(dev, NULL, vif, true);
+               mt7921_mcu_sta_update(dev, NULL, vif, true,
+                                     MT76_STA_INFO_STATE_ASSOC);
                mt7921_bss_bcnft_apply(dev, vif, info->assoc);
        }
 
@@ -661,14 +629,14 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        if (ret)
                return ret;
 
-       if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
-               mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
-                                           true);
+       if (vif->type == NL80211_IFTYPE_STATION)
+               mvif->wep_sta = msta;
 
        mt7921_mac_wtbl_update(dev, idx,
                               MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
 
-       ret = mt7921_mcu_sta_add(dev, sta, vif, true);
+       ret = mt7921_mcu_sta_update(dev, sta, vif, true,
+                                   MT76_STA_INFO_STATE_NONE);
        if (ret)
                return ret;
 
@@ -677,6 +645,27 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        return 0;
 }
 
+void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+                         struct ieee80211_sta *sta)
+{
+       struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+       struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+
+       mt7921_mutex_acquire(dev);
+
+       if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+               mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
+                                           true);
+
+       mt7921_mac_wtbl_update(dev, msta->wcid.idx,
+                              MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+       mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
+
+       mt7921_mutex_release(dev);
+}
+
 void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta)
 {
@@ -686,13 +675,14 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
        mt76_connac_pm_wake(&dev->mphy, &dev->pm);
 
-       mt7921_mcu_sta_add(dev, sta, vif, false);
+       mt7921_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE);
        mt7921_mac_wtbl_update(dev, msta->wcid.idx,
                               MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
 
        if (vif->type == NL80211_IFTYPE_STATION) {
                struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
 
+               mvif->wep_sta = NULL;
                ewma_rssi_init(&mvif->rssi);
                if (!sta->tdls)
                        mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
@@ -720,7 +710,7 @@ void mt7921_tx_worker(struct mt76_worker *w)
        }
 
        mt76_txq_schedule_all(&dev->mphy);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 }
 
 static void mt7921_tx(struct ieee80211_hw *hw,
@@ -750,7 +740,7 @@ static void mt7921_tx(struct ieee80211_hw *hw,
 
        if (mt76_connac_pm_ref(mphy, &dev->pm)) {
                mt76_tx(mphy, control->sta, wcid, skb);
-               mt76_connac_pm_unref(&dev->pm);
+               mt76_connac_pm_unref(mphy, &dev->pm);
                return;
        }
 
@@ -831,20 +821,21 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        return ret;
 }
 
-static int
-mt7921_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-              struct ieee80211_sta *sta)
+static int mt7921_sta_state(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta,
+                           enum ieee80211_sta_state old_state,
+                           enum ieee80211_sta_state new_state)
 {
-       return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
-                             IEEE80211_STA_NONE);
-}
+       struct mt7921_dev *dev = mt7921_hw_dev(hw);
 
-static int
-mt7921_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                 struct ieee80211_sta *sta)
-{
-       return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
-                             IEEE80211_STA_NOTEXIST);
+       if (dev->pm.ds_enable) {
+               mt7921_mutex_acquire(dev);
+               mt76_connac_sta_state_dp(&dev->mt76, old_state, new_state);
+               mt7921_mutex_release(dev);
+       }
+
+       return mt76_sta_state(hw, vif, sta, old_state, new_state);
 }
 
 static int
@@ -1163,6 +1154,23 @@ static void mt7921_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                           HZ / 2);
 }
 
+static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
+                                        struct ieee80211_vif *vif,
+                                        struct ieee80211_sta *sta,
+                                        bool enabled)
+{
+       struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+       struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+       if (enabled)
+               set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+       else
+               clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+
+       mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
+                                            MCU_UNI_CMD_STA_REC_UPDATE);
+}
+
 const struct ieee80211_ops mt7921_ops = {
        .tx = mt7921_tx,
        .start = mt7921_start,
@@ -1173,10 +1181,10 @@ const struct ieee80211_ops mt7921_ops = {
        .conf_tx = mt7921_conf_tx,
        .configure_filter = mt7921_configure_filter,
        .bss_info_changed = mt7921_bss_info_changed,
-       .sta_add = mt7921_sta_add,
-       .sta_remove = mt7921_sta_remove,
+       .sta_state = mt7921_sta_state,
        .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
        .set_key = mt7921_set_key,
+       .sta_set_decap_offload = mt7921_sta_set_decap_offload,
        .ampdu_action = mt7921_ampdu_action,
        .set_rts_threshold = mt7921_set_rts_threshold,
        .wake_tx_queue = mt76_wake_tx_queue,
index 67dc4b4..c2c4dc1 100644 (file)
@@ -88,28 +88,28 @@ struct mt7921_fw_region {
 #define to_wcid_lo(id)                 FIELD_GET(GENMASK(7, 0), (u16)id)
 #define to_wcid_hi(id)                 FIELD_GET(GENMASK(9, 8), (u16)id)
 
-static enum mt7921_cipher_type
+static enum mcu_cipher_type
 mt7921_mcu_get_cipher(int cipher)
 {
        switch (cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
+               return MCU_CIPHER_WEP40;
        case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
+               return MCU_CIPHER_WEP104;
        case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
+               return MCU_CIPHER_TKIP;
        case WLAN_CIPHER_SUITE_AES_CMAC:
-               return MT_CIPHER_BIP_CMAC_128;
+               return MCU_CIPHER_BIP_CMAC_128;
        case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
+               return MCU_CIPHER_AES_CCMP;
        case WLAN_CIPHER_SUITE_CCMP_256:
-               return MT_CIPHER_CCMP_256;
+               return MCU_CIPHER_CCMP_256;
        case WLAN_CIPHER_SUITE_GCMP:
-               return MT_CIPHER_GCMP;
+               return MCU_CIPHER_GCMP;
        case WLAN_CIPHER_SUITE_GCMP_256:
-               return MT_CIPHER_GCMP_256;
+               return MCU_CIPHER_GCMP_256;
        case WLAN_CIPHER_SUITE_SMS4:
-               return MT_CIPHER_WAPI;
+               return MCU_CIPHER_WAPI;
        default:
                return MT_CIPHER_NONE;
        }
@@ -398,43 +398,6 @@ mt7921_mcu_tx_rate_parse(struct mt76_phy *mphy,
        }
 }
 
-static void
-mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
-                         u16 wlan_idx)
-{
-       struct mt7921_mcu_wlan_info_event *wtbl_info;
-       struct mt76_phy *mphy = &dev->mphy;
-       struct mt7921_sta_stats *stats;
-       struct rate_info rate = {};
-       struct mt7921_sta *msta;
-       struct mt76_wcid *wcid;
-       u8 idx;
-
-       if (wlan_idx >= MT76_N_WCIDS)
-               return;
-
-       wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data;
-       idx = wtbl_info->rate_info.rate_idx;
-       if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate))
-               return;
-
-       rcu_read_lock();
-
-       wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
-       if (!wcid)
-               goto out;
-
-       msta = container_of(wcid, struct mt7921_sta, wcid);
-       stats = &msta->stats;
-
-       /* current rate */
-       mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate,
-                                le16_to_cpu(wtbl_info->rate_info.rate[idx]));
-       stats->tx_rate = rate;
-out:
-       rcu_read_unlock();
-}
-
 static void
 mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
 {
@@ -450,22 +413,33 @@ mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
 }
 
 static void
-mt7921_mcu_beacon_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
+mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
+                               struct ieee80211_vif *vif)
+{
+       struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+       struct mt76_connac_beacon_loss_event *event = priv;
+
+       if (mvif->idx != event->bss_idx)
+               return;
+
+       if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+               return;
+
+       ieee80211_connection_loss(vif);
+}
+
+static void
+mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
 {
        struct mt76_connac_beacon_loss_event *event;
-       struct mt76_phy *mphy;
-       u8 band_idx = 0; /* DBDC support */
+       struct mt76_phy *mphy = &dev->mt76.phy;
 
        skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
        event = (struct mt76_connac_beacon_loss_event *)skb->data;
-       if (band_idx && dev->mt76.phy2)
-               mphy = dev->mt76.phy2;
-       else
-               mphy = &dev->mt76.phy;
 
        ieee80211_iterate_active_interfaces_atomic(mphy->hw,
                                        IEEE80211_IFACE_ITER_RESUME_ALL,
-                                       mt76_connac_mcu_beacon_loss_iter, event);
+                                       mt7921_mcu_connection_loss_iter, event);
 }
 
 static void
@@ -523,6 +497,49 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
        trace_lp_event(dev, event->state);
 }
 
+static void
+mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+       struct mt7921_mcu_tx_done_event *event;
+       struct mt7921_sta *msta;
+       struct mt7921_phy *mphy = &dev->phy;
+       struct mt7921_mcu_peer_cap peer;
+       struct ieee80211_sta *sta;
+       LIST_HEAD(list);
+
+       skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+       event = (struct mt7921_mcu_tx_done_event *)skb->data;
+
+       spin_lock_bh(&dev->sta_poll_lock);
+       list_splice_init(&mphy->stats_list, &list);
+
+       while (!list_empty(&list)) {
+               msta = list_first_entry(&list, struct mt7921_sta, stats_list);
+               list_del_init(&msta->stats_list);
+
+               if (msta->wcid.idx != event->wlan_idx)
+                       continue;
+
+               spin_unlock_bh(&dev->sta_poll_lock);
+
+               sta = wcid_to_sta(&msta->wcid);
+
+               /* peer config based on IEEE SPEC */
+               memset(&peer, 0x0, sizeof(peer));
+               peer.bw = event->bw;
+               peer.g2 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+               peer.g4 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+               peer.g8 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+               peer.g16 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
+               mt7921_mcu_tx_rate_parse(mphy->mt76, &peer,
+                                        &msta->stats.tx_rate, event->tx_rate);
+
+               spin_lock_bh(&dev->sta_poll_lock);
+               break;
+       }
+       spin_unlock_bh(&dev->sta_poll_lock);
+}
+
 static void
 mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
 {
@@ -530,7 +547,7 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
 
        switch (rxd->eid) {
        case MCU_EVENT_BSS_BEACON_LOSS:
-               mt7921_mcu_beacon_loss_event(dev, skb);
+               mt7921_mcu_connection_loss_event(dev, skb);
                break;
        case MCU_EVENT_SCHED_SCAN_DONE:
        case MCU_EVENT_SCAN_DONE:
@@ -549,6 +566,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
        case MCU_EVENT_LP_INFO:
                mt7921_mcu_low_power_event(dev, skb);
                break;
+       case MCU_EVENT_TX_DONE:
+               mt7921_mcu_tx_done_event(dev, skb);
+               break;
        default:
                break;
        }
@@ -569,6 +589,7 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
            rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
            rxd->eid == MCU_EVENT_BSS_ABSENCE ||
            rxd->eid == MCU_EVENT_SCAN_DONE ||
+           rxd->eid == MCU_EVENT_TX_DONE ||
            rxd->eid == MCU_EVENT_DBG_MSG ||
            rxd->eid == MCU_EVENT_COREDUMP ||
            rxd->eid == MCU_EVENT_LP_INFO ||
@@ -604,14 +625,14 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
                sec_key = &sec->key[0];
                sec_key->cipher_len = sizeof(*sec_key);
 
-               if (cipher == MT_CIPHER_BIP_CMAC_128) {
-                       sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+               if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+                       sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
                        sec_key->key_id = bip->keyidx;
                        sec_key->key_len = 16;
                        memcpy(sec_key->key, bip->key, 16);
 
                        sec_key = &sec->key[1];
-                       sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
+                       sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
                        sec_key->cipher_len = sizeof(*sec_key);
                        sec_key->key_len = 16;
                        memcpy(sec_key->key, key->key, 16);
@@ -623,14 +644,14 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
                        sec_key->key_len = key->keylen;
                        memcpy(sec_key->key, key->key, key->keylen);
 
-                       if (cipher == MT_CIPHER_TKIP) {
+                       if (cipher == MCU_CIPHER_TKIP) {
                                /* Rx/Tx MIC keys are swapped */
                                memcpy(sec_key->key + 16, key->key + 24, 8);
                                memcpy(sec_key->key + 24, key->key + 16, 8);
                        }
 
                        /* store key_conf for BIP batch update */
-                       if (cipher == MT_CIPHER_AES_CCMP) {
+                       if (cipher == MCU_CIPHER_AES_CCMP) {
                                memcpy(bip->key, key->key, key->keylen);
                                bip->keyidx = key->keyidx;
                        }
@@ -934,8 +955,6 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
        dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
 #endif /* CONFIG_PM */
 
-       clear_bit(MT76_STATE_PM, &dev->mphy.state);
-
        dev_err(dev->mt76.dev, "Firmware init done\n");
 
        return 0;
@@ -969,7 +988,7 @@ int mt7921_run_firmware(struct mt7921_dev *dev)
        set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
        mt7921_mcu_fw_log_2_host(dev, 1);
 
-       return 0;
+       return mt76_connac_mcu_get_nic_capability(&dev->mphy);
 }
 
 int mt7921_mcu_init(struct mt7921_dev *dev)
@@ -1136,26 +1155,6 @@ int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
        return 0;
 }
 
-u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx)
-{
-       struct mt7921_mcu_wlan_info wtbl_info = {
-               .wlan_idx = cpu_to_le32(wlan_idx),
-       };
-       struct sk_buff *skb;
-       int ret;
-
-       ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_WTBL,
-                                       &wtbl_info, sizeof(wtbl_info), true,
-                                       &skb);
-       if (ret)
-               return ret;
-
-       mt7921_mcu_tx_rate_report(dev, skb, wlan_idx);
-       dev_kfree_skb(skb);
-
-       return 0;
-}
-
 int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
 {
        struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
@@ -1268,8 +1267,9 @@ int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
                                 sizeof(req), false);
 }
 
-int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
-                      struct ieee80211_vif *vif, bool enable)
+int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
+                         struct ieee80211_vif *vif, bool enable,
+                         enum mt76_sta_info_state state)
 {
        struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
        int rssi = -ewma_rssi_read(&mvif->rssi);
@@ -1278,27 +1278,25 @@ int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
                .vif = vif,
                .enable = enable,
                .cmd = MCU_UNI_CMD_STA_REC_UPDATE,
+               .state = state,
+               .offload_fw = true,
                .rcpi = to_rcpi(rssi),
        };
        struct mt7921_sta *msta;
 
        msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL;
        info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
+       info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
 
-       return mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info);
+       return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
 }
 
-int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
+int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
 {
        struct mt76_phy *mphy = &dev->mt76.phy;
        struct mt76_connac_pm *pm = &dev->pm;
        int i, err = 0;
 
-       mutex_lock(&pm->mutex);
-
-       if (!test_bit(MT76_STATE_PM, &mphy->state))
-               goto out;
-
        for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
                mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
                if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
@@ -1318,6 +1316,22 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
        pm->stats.last_wake_event = jiffies;
        pm->stats.doze_time += pm->stats.last_wake_event -
                               pm->stats.last_doze_event;
+out:
+       return err;
+}
+
+int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+       struct mt76_phy *mphy = &dev->mt76.phy;
+       struct mt76_connac_pm *pm = &dev->pm;
+       int err = 0;
+
+       mutex_lock(&pm->mutex);
+
+       if (!test_bit(MT76_STATE_PM, &mphy->state))
+               goto out;
+
+       err = __mt7921_mcu_drv_pmctrl(dev);
 out:
        mutex_unlock(&pm->mutex);
 
@@ -1368,6 +1382,7 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
 {
        struct mt7921_phy *phy = priv;
        struct mt7921_dev *dev = phy->dev;
+       struct ieee80211_hw *hw = mt76_hw(dev);
        int ret;
 
        if (dev->pm.enable)
@@ -1380,9 +1395,11 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
 
        if (dev->pm.enable) {
                vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+               ieee80211_hw_set(hw, CONNECTION_MONITOR);
                mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
        } else {
                vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+               __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
                mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
        }
 }
index 49823d0..d76cf8f 100644 (file)
@@ -81,6 +81,7 @@ enum {
        MCU_EVENT_REG_ACCESS = 0x05,
        MCU_EVENT_LP_INFO = 0x07,
        MCU_EVENT_SCAN_DONE = 0x0d,
+       MCU_EVENT_TX_DONE = 0x0f,
        MCU_EVENT_BSS_ABSENCE  = 0x11,
        MCU_EVENT_BSS_BEACON_LOSS = 0x13,
        MCU_EVENT_CH_PRIVILEGE = 0x18,
@@ -197,18 +198,17 @@ struct sta_rec_sec {
        struct sec_key key[2];
 } __packed;
 
-enum mt7921_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_WEP128,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_CCMP_256,
-       MT_CIPHER_GCMP,
-       MT_CIPHER_GCMP_256,
-       MT_CIPHER_WAPI,
-       MT_CIPHER_BIP_CMAC_128,
+enum mcu_cipher_type {
+       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_WEP104,
+       MCU_CIPHER_WEP128,
+       MCU_CIPHER_TKIP,
+       MCU_CIPHER_AES_CCMP,
+       MCU_CIPHER_CCMP_256,
+       MCU_CIPHER_GCMP,
+       MCU_CIPHER_GCMP_256,
+       MCU_CIPHER_WAPI,
+       MCU_CIPHER_BIP_CMAC_128,
 };
 
 enum {
@@ -254,86 +254,6 @@ struct mt7921_mcu_reg_event {
        __le32 val;
 } __packed;
 
-struct mt7921_mcu_tx_config {
-       u8 peer_addr[ETH_ALEN];
-       u8 sw;
-       u8 dis_rx_hdr_tran;
-
-       u8 aad_om;
-       u8 pfmu_idx;
-       __le16 partial_aid;
-
-       u8 ibf;
-       u8 ebf;
-       u8 is_ht;
-       u8 is_vht;
-
-       u8 mesh;
-       u8 baf_en;
-       u8 cf_ack;
-       u8 rdg_ba;
-
-       u8 rdg;
-       u8 pm;
-       u8 rts;
-       u8 smps;
-
-       u8 txop_ps;
-       u8 not_update_ipsm;
-       u8 skip_tx;
-       u8 ldpc;
-
-       u8 qos;
-       u8 from_ds;
-       u8 to_ds;
-       u8 dyn_bw;
-
-       u8 amdsu_cross_lg;
-       u8 check_per;
-       u8 gid_63;
-       u8 he;
-
-       u8 vht_ibf;
-       u8 vht_ebf;
-       u8 vht_ldpc;
-       u8 he_ldpc;
-} __packed;
-
-struct mt7921_mcu_sec_config {
-       u8 wpi_flag;
-       u8 rv;
-       u8 ikv;
-       u8 rkv;
-
-       u8 rcid;
-       u8 rca1;
-       u8 rca2;
-       u8 even_pn;
-
-       u8 key_id;
-       u8 muar_idx;
-       u8 cipher_suit;
-       u8 rsv[1];
-} __packed;
-
-struct mt7921_mcu_key_config {
-       u8 key[32];
-} __packed;
-
-struct mt7921_mcu_rate_info {
-       u8 mpdu_fail;
-       u8 mpdu_tx;
-       u8 rate_idx;
-       u8 rsv[1];
-       __le16 rate[8];
-} __packed;
-
-struct mt7921_mcu_ba_config {
-       u8 ba_en;
-       u8 rsv[3];
-       __le32 ba_winsize;
-} __packed;
-
 struct mt7921_mcu_ant_id_config {
        u8 ant_id[4];
 } __packed;
@@ -357,41 +277,6 @@ struct mt7921_mcu_peer_cap {
        u8 rsv[1];
 } __packed;
 
-struct mt7921_mcu_rx_cnt {
-       u8 rx_rcpi[4];
-       u8 rx_cc[4];
-       u8 rx_cc_sel;
-       u8 ce_rmsd;
-       u8 rsv[2];
-} __packed;
-
-struct mt7921_mcu_tx_cnt {
-       __le16 rate1_cnt;
-       __le16 rate1_fail_cnt;
-       __le16 rate2_cnt;
-       __le16 rate3_cnt;
-       __le16 cur_bw_tx_cnt;
-       __le16 cur_bw_tx_fail_cnt;
-       __le16 other_bw_tx_cnt;
-       __le16 other_bw_tx_fail_cnt;
-} __packed;
-
-struct mt7921_mcu_wlan_info_event {
-       struct mt7921_mcu_tx_config tx_config;
-       struct mt7921_mcu_sec_config sec_config;
-       struct mt7921_mcu_key_config key_config;
-       struct mt7921_mcu_rate_info rate_info;
-       struct mt7921_mcu_ba_config ba_config;
-       struct mt7921_mcu_peer_cap peer_cap;
-       struct mt7921_mcu_rx_cnt rx_cnt;
-       struct mt7921_mcu_tx_cnt tx_cnt;
-} __packed;
-
-struct mt7921_mcu_wlan_info {
-       __le32 wlan_idx;
-       struct mt7921_mcu_wlan_info_event event;
-} __packed;
-
 struct mt7921_txpwr_req {
        u8 ver;
        u8 action;
@@ -407,4 +292,31 @@ struct mt7921_txpwr_event {
        struct mt7921_txpwr txpwr;
 } __packed;
 
+struct mt7921_mcu_tx_done_event {
+       u8 pid;
+       u8 status;
+       u16 seq;
+
+       u8 wlan_idx;
+       u8 tx_cnt;
+       u16 tx_rate;
+
+       u8 flag;
+       u8 tid;
+       u8 rsp_rate;
+       u8 mcs;
+
+       u8 bw;
+       u8 tx_pwr;
+       u8 reason;
+       u8 rsv0[1];
+
+       u32 delay;
+       u32 timestamp;
+       u32 applied_flag;
+
+       u8 txs[28];
+
+       u8 rsv1[32];
+} __packed;
 #endif
index 59862ea..2d8bd6b 100644 (file)
@@ -92,6 +92,8 @@ struct mt7921_sta {
        unsigned long ampdu_state;
 
        struct mt7921_sta_key_conf bip;
+
+       unsigned long next_txs_ts;
 };
 
 DECLARE_EWMA(rssi, 10, 8);
@@ -100,6 +102,8 @@ struct mt7921_vif {
        struct mt76_vif mt76; /* must be first */
 
        struct mt7921_sta sta;
+       struct mt7921_sta *wep_sta;
+
        struct mt7921_phy *phy;
 
        struct ewma_rssi rssi;
@@ -156,6 +160,8 @@ struct mt7921_dev {
        u16 chainmask;
 
        struct work_struct reset_work;
+       bool hw_full_reset:1;
+       bool hw_init_done:1;
 
        struct list_head sta_poll_list;
        spinlock_t sta_poll_lock;
@@ -256,9 +262,9 @@ int mt7921_mcu_init(struct mt7921_dev *dev);
 int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
                       struct mt7921_sta *msta, struct ieee80211_key_conf *key,
                       enum set_key_cmd cmd);
-int mt7921_set_channel(struct mt7921_phy *phy);
-int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
-                      struct ieee80211_vif *vif, bool enable);
+int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
+                         struct ieee80211_vif *vif, bool enable,
+                         enum mt76_sta_info_state state);
 int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
 int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
 int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
@@ -318,7 +324,7 @@ static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev)
        return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
 }
 
-void mt7921_mac_init(struct mt7921_dev *dev);
+int mt7921_mac_init(struct mt7921_dev *dev);
 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
 void mt7921_mac_reset_counters(struct mt7921_phy *phy);
 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
@@ -330,6 +336,8 @@ void mt7921_mac_fill_rx_vector(struct mt7921_dev *dev, struct sk_buff *skb);
 void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb);
 int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
+void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+                         struct ieee80211_sta *sta);
 void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta);
 void mt7921_mac_work(struct work_struct *work);
@@ -352,7 +360,7 @@ void mt7921_stats_work(struct work_struct *work);
 void mt7921_txp_skb_unmap(struct mt76_dev *dev,
                          struct mt76_txwi_cache *txwi);
 void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
-void mt7921_update_channel(struct mt76_dev *mdev);
+void mt7921_update_channel(struct mt76_phy *mphy);
 int mt7921_init_debugfs(struct mt7921_dev *dev);
 
 int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
@@ -362,12 +370,12 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
                         struct ieee80211_ampdu_params *params,
                         bool enable);
 void mt7921_scan_work(struct work_struct *work);
-u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx);
 int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
 int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
                             bool enable);
 int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
                          bool enable);
+int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
 int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
 void mt7921_pm_wake_work(struct work_struct *work);
index fa02d93..c3905bc 100644 (file)
@@ -106,6 +106,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
                .rx_poll_complete = mt7921_rx_poll_complete,
                .sta_ps = mt7921_sta_ps,
                .sta_add = mt7921_mac_sta_add,
+               .sta_assoc = mt7921_mac_sta_assoc,
                .sta_remove = mt7921_mac_sta_remove,
                .update_survey = mt7921_update_channel,
        };
@@ -188,22 +189,29 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct mt76_dev *mdev = pci_get_drvdata(pdev);
        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       struct mt76_connac_pm *pm = &dev->pm;
        bool hif_suspend;
        int i, err;
 
-       err = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+       pm->suspended = true;
+       cancel_delayed_work_sync(&pm->ps_work);
+       cancel_work_sync(&pm->wake_work);
+
+       err = mt7921_mcu_drv_pmctrl(dev);
        if (err < 0)
-               return err;
+               goto restore_suspend;
 
        hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
        if (hif_suspend) {
                err = mt76_connac_mcu_set_hif_suspend(mdev, true);
                if (err)
-                       return err;
+                       goto restore_suspend;
        }
 
-       if (!dev->pm.enable)
-               mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
+       /* always enable deep sleep during suspend to reduce
+        * power consumption
+        */
+       mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
 
        napi_disable(&mdev->tx_napi);
        mt76_worker_disable(&mdev->tx_worker);
@@ -231,27 +239,30 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 
        err = mt7921_mcu_fw_pmctrl(dev);
        if (err)
-               goto restore;
+               goto restore_napi;
 
        pci_save_state(pdev);
        err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
        if (err)
-               goto restore;
+               goto restore_napi;
 
        return 0;
 
-restore:
+restore_napi:
        mt76_for_each_q_rx(mdev, i) {
                napi_enable(&mdev->napi[i]);
        }
        napi_enable(&mdev->tx_napi);
 
-       if (!dev->pm.enable)
+       if (!pm->ds_enable)
                mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
 
        if (hif_suspend)
                mt76_connac_mcu_set_hif_suspend(mdev, false);
 
+restore_suspend:
+       pm->suspended = false;
+
        return err;
 }
 
@@ -259,8 +270,10 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
 {
        struct mt76_dev *mdev = pci_get_drvdata(pdev);
        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       struct mt76_connac_pm *pm = &dev->pm;
        int i, err;
 
+       pm->suspended = false;
        err = pci_set_power_state(pdev, PCI_D0);
        if (err)
                return err;
@@ -291,7 +304,8 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
        napi_enable(&mdev->tx_napi);
        napi_schedule(&mdev->tx_napi);
 
-       if (!dev->pm.enable)
+       /* restore previous ds setting */
+       if (!pm->ds_enable)
                mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
 
        if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state))
index a18d289..783a156 100644 (file)
@@ -184,9 +184,6 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
        if (!q->queued)
                wake_up(&dev->tx_wait);
 
-       if (!mcu)
-               mt76_txq_schedule(&dev->phy, q->qid);
-
        return nframes;
 }
 
@@ -195,19 +192,28 @@ static void mt76s_status_worker(struct mt76_worker *w)
        struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
                                              status_worker);
        struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+       bool resched = false;
        int i, nframes;
 
        do {
+               int ndata_frames = 0;
+
                nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
 
                for (i = 0; i <= MT_TXQ_PSD; i++)
-                       nframes += mt76s_process_tx_queue(dev,
-                                                         dev->phy.q_tx[i]);
+                       ndata_frames += mt76s_process_tx_queue(dev,
+                                                              dev->phy.q_tx[i]);
+               nframes += ndata_frames;
+               if (ndata_frames > 0)
+                       resched = true;
 
                if (dev->drv->tx_status_data &&
                    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
                        queue_work(dev->wq, &dev->sdio.stat_work);
        } while (nframes > 0);
+
+       if (resched)
+               mt76_worker_schedule(&dev->sdio.txrx_worker);
 }
 
 static void mt76s_tx_status_data(struct work_struct *work)
@@ -256,6 +262,7 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 
        q->entry[q->head].skb = tx_info.skb;
        q->entry[q->head].buf_sz = len;
+       q->entry[q->head].wcid = 0xffff;
 
        smp_wmb();
 
index 001d0ba..f73ffbd 100644 (file)
@@ -88,17 +88,8 @@ static void
 mt76_testmode_free_skb(struct mt76_phy *phy)
 {
        struct mt76_testmode_data *td = &phy->test;
-       struct sk_buff *skb = td->tx_skb;
-
-       if (!skb)
-               return;
 
-       if (skb_has_frag_list(skb)) {
-               kfree_skb_list(skb_shinfo(skb)->frag_list);
-               skb_shinfo(skb)->frag_list = NULL;
-       }
-
-       dev_kfree_skb(skb);
+       dev_kfree_skb(td->tx_skb);
        td->tx_skb = NULL;
 }
 
@@ -158,19 +149,18 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
                        frag_len = MT_TXP_MAX_LEN;
 
                frag = alloc_skb(frag_len, GFP_KERNEL);
-               if (!frag)
+               if (!frag) {
+                       mt76_testmode_free_skb(phy);
+                       dev_kfree_skb(head);
                        return -ENOMEM;
+               }
 
                __skb_put_zero(frag, frag_len);
                head->len += frag->len;
                head->data_len += frag->len;
 
-               if (*frag_tail) {
-                       (*frag_tail)->next = frag;
-                       frag_tail = &frag;
-               } else {
-                       *frag_tail = frag;
-               }
+               *frag_tail = frag;
+               frag_tail = &(*frag_tail)->next;
        }
 
        mt76_testmode_free_skb(phy);
@@ -531,6 +521,14 @@ mt76_testmode_dump_stats(struct mt76_phy *phy, struct sk_buff *msg)
        u64 rx_fcs_error = 0;
        int i;
 
+       if (dev->test_ops->dump_stats) {
+               int ret;
+
+               ret = dev->test_ops->dump_stats(phy, msg);
+               if (ret)
+                       return ret;
+       }
+
        for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) {
                rx_packets += td->rx_stats.packets[i];
                rx_fcs_error += td->rx_stats.fcs_error[i];
@@ -545,9 +543,6 @@ mt76_testmode_dump_stats(struct mt76_phy *phy, struct sk_buff *msg)
                              MT76_TM_STATS_ATTR_PAD))
                return -EMSGSIZE;
 
-       if (dev->test_ops->dump_stats)
-               return dev->test_ops->dump_stats(phy, msg);
-
        return 0;
 }
 
index 53ea8de..f0f7a91 100644 (file)
@@ -54,11 +54,23 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
 
        spin_unlock_bh(&dev->status_list.lock);
 
+       rcu_read_lock();
        while ((skb = __skb_dequeue(list)) != NULL) {
+               struct ieee80211_tx_status status = {
+                       .skb = skb,
+                       .info = IEEE80211_SKB_CB(skb),
+               };
+               struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+               struct mt76_wcid *wcid;
+
+               wcid = rcu_dereference(dev->wcid[cb->wcid]);
+               if (wcid)
+                       status.sta = wcid_to_sta(wcid);
+
                hw = mt76_tx_status_get_hw(dev, skb);
-               ieee80211_tx_status(hw, skb);
+               ieee80211_tx_status_ext(hw, &status);
        }
-
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
 
@@ -80,7 +92,7 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
 
        /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
        if (flags & MT_TX_CB_TXS_FAILED) {
-               ieee80211_tx_info_clear_status(info);
+               info->status.rates[0].count = 0;
                info->status.rates[0].idx = -1;
                info->flags |= IEEE80211_TX_STAT_ACK;
        }
@@ -117,12 +129,7 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
        spin_lock_bh(&dev->status_list.lock);
 
        memset(cb, 0, sizeof(*cb));
-       wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
-       if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
-           wcid->packet_id == MT_PACKET_ID_NO_SKB)
-               wcid->packet_id = MT_PACKET_ID_FIRST;
-
-       pid = wcid->packet_id;
+       pid = mt76_get_next_pkt_id(wcid);
        cb->wcid = wcid->idx;
        cb->pktid = pid;
        cb->jiffies = jiffies;
@@ -173,36 +180,37 @@ mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
 
 static void
-mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
+                     struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct mt76_wcid *wcid;
        int pending;
 
-       if (info->tx_time_est)
-               return;
-
-       if (wcid_idx >= ARRAY_SIZE(dev->wcid))
+       if (!wcid || info->tx_time_est)
                return;
 
-       rcu_read_lock();
-
-       wcid = rcu_dereference(dev->wcid[wcid_idx]);
-       if (wcid) {
-               pending = atomic_dec_return(&wcid->non_aql_packets);
-               if (pending < 0)
-                       atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
-       }
-
-       rcu_read_unlock();
+       pending = atomic_dec_return(&wcid->non_aql_packets);
+       if (pending < 0)
+               atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
 }
 
-void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
+                           struct list_head *free_list)
 {
+       struct ieee80211_tx_status status = {
+               .skb = skb,
+               .free_list = free_list,
+       };
+       struct mt76_wcid *wcid = NULL;
        struct ieee80211_hw *hw;
        struct sk_buff_head list;
 
-       mt76_tx_check_non_aql(dev, wcid_idx, skb);
+       rcu_read_lock();
+
+       if (wcid_idx < ARRAY_SIZE(dev->wcid))
+               wcid = rcu_dereference(dev->wcid[wcid_idx]);
+
+       mt76_tx_check_non_aql(dev, wcid, skb);
 
 #ifdef CONFIG_NL80211_TESTMODE
        if (mt76_is_testmode_skb(dev, skb, &hw)) {
@@ -214,21 +222,25 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk
                        wake_up(&dev->tx_wait);
 
                dev_kfree_skb_any(skb);
-               return;
+               goto out;
        }
 #endif
 
        if (!skb->prev) {
                hw = mt76_tx_status_get_hw(dev, skb);
-               ieee80211_free_txskb(hw, skb);
-               return;
+               status.sta = wcid_to_sta(wcid);
+               ieee80211_tx_status_ext(hw, &status);
+               goto out;
        }
 
        mt76_tx_status_lock(dev, &list);
        __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
        mt76_tx_status_unlock(dev, &list);
+
+out:
+       rcu_read_unlock();
 }
-EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
+EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
 
 static int
 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
@@ -244,11 +256,15 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
 
        non_aql = !info->tx_time_est;
        idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
-       if (idx < 0 || !sta || !non_aql)
+       if (idx < 0 || !sta)
                return idx;
 
        wcid = (struct mt76_wcid *)sta->drv_priv;
        q->entry[idx].wcid = wcid->idx;
+
+       if (!non_aql)
+               return idx;
+
        pending = atomic_inc_return(&wcid->non_aql_packets);
        if (stop && pending >= MT_MAX_NON_AQL_PKT)
                *stop = true;
@@ -285,7 +301,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
                skb_set_queue_mapping(skb, qid);
        }
 
-       if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
+       if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
                ieee80211_get_tx_rates(info->control.vif, sta, skb,
                                       info->control.rates, 1);
 
index 30bc54e..1e9f60b 100644 (file)
@@ -925,6 +925,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 
        q->head = (q->head + 1) % q->ndesc;
        q->entry[idx].skb = tx_info.skb;
+       q->entry[idx].wcid = 0xffff;
        q->queued++;
 
        return idx;
index 6bcc4a1..cc77204 100644 (file)
@@ -26,6 +26,7 @@ static const struct usb_device_id mt7601u_device_table[] = {
        { USB_DEVICE(0x2717, 0x4106) },
        { USB_DEVICE(0x2955, 0x0001) },
        { USB_DEVICE(0x2955, 0x1001) },
+       { USB_DEVICE(0x2955, 0x1003) },
        { USB_DEVICE(0x2a5f, 0x1000) },
        { USB_DEVICE(0x7392, 0x7710) },
        { 0, }
index d1a566c..0173577 100644 (file)
@@ -853,15 +853,10 @@ struct rtl8192eu_efuse {
        u8 usb_optional_function;
        u8 res9[2];
        u8 mac_addr[ETH_ALEN];          /* 0xd7 */
-       u8 res10[2];
-       u8 vendor_name[7];
-       u8 res11[2];
-       u8 device_name[0x0b];           /* 0xe8 */
-       u8 res12[2];
-       u8 serial[0x0b];                /* 0xf5 */
-       u8 res13[0x30];
+       u8 device_info[80];
+       u8 res11[3];
        u8 unknown[0x0d];               /* 0x130 */
-       u8 res14[0xc3];
+       u8 res12[0xc3];
 };
 
 struct rtl8xxxu_reg8val {
index cfe2dfd..b06508d 100644 (file)
@@ -554,9 +554,43 @@ rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
        }
 }
 
+static void rtl8192eu_log_next_device_info(struct rtl8xxxu_priv *priv,
+                                          char *record_name,
+                                          char *device_info,
+                                          unsigned int *record_offset)
+{
+       char *record = device_info + *record_offset;
+
+       /* A record is [ total length | 0x03 | value ] */
+       unsigned char l = record[0];
+
+       /*
+        * The whole device info section seems to be 80 characters, make sure
+        * we don't read further.
+        */
+       if (*record_offset + l > 80) {
+               dev_warn(&priv->udev->dev,
+                        "invalid record length %d while parsing \"%s\" at offset %u.\n",
+                        l, record_name, *record_offset);
+               return;
+       }
+
+       if (l >= 2) {
+               char value[80];
+
+               memcpy(value, &record[2], l - 2);
+               value[l - 2] = '\0';
+               dev_info(&priv->udev->dev, "%s: %s\n", record_name, value);
+               *record_offset = *record_offset + l;
+       } else {
+               dev_info(&priv->udev->dev, "%s not available.\n", record_name);
+       }
+}
+
 static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
 {
        struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
+       unsigned int record_offset;
        int i;
 
        if (efuse->rtl_id != cpu_to_le16(0x8129))
@@ -604,12 +638,25 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
        priv->has_xtalk = 1;
        priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
 
-       dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
-       dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
-       if (memchr_inv(efuse->serial, 0xff, 11))
-               dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
-       else
-               dev_info(&priv->udev->dev, "Serial not available.\n");
+       /*
+        * device_info section seems to be laid out as records
+        * [ total length | 0x03 | value ] so:
+        * - vendor length + 2
+        * - 0x03
+        * - vendor string (not null terminated)
+        * - product length + 2
+        * - 0x03
+        * - product string (not null terminated)
+        * Then there is one or 2 0x00 on all the 4 devices I own or found
+        * dumped online.
+        * As previous version of the code handled an optional serial
+        * string, I now assume there may be a third record if the
+        * length is not 0.
+        */
+       record_offset = 0;
+       rtl8192eu_log_next_device_info(priv, "Vendor", efuse->device_info, &record_offset);
+       rtl8192eu_log_next_device_info(priv, "Product", efuse->device_info, &record_offset);
+       rtl8192eu_log_next_device_info(priv, "Serial", efuse->device_info, &record_offset);
 
        if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
                unsigned char *raw = priv->efuse_wifi.raw;
index 9ff09cf..ac1061c 100644 (file)
@@ -5554,6 +5554,11 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
        urb_len = skb->len;
        pkt_cnt = 0;
 
+       if (urb_len < sizeof(struct rtl8xxxu_rxdesc16)) {
+               kfree_skb(skb);
+               return RX_TYPE_ERROR;
+       }
+
        do {
                rx_desc = (struct rtl8xxxu_rxdesc16 *)skb->data;
                _rx_desc_le = (__le32 *)skb->data;
@@ -5581,7 +5586,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
                 * at least cover the rx descriptor
                 */
                if (pkt_cnt > 1 &&
-                   urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
+                   urb_len >= (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
                        next_skb = skb_clone(skb, GFP_ATOMIC);
 
                rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5627,7 +5632,9 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
 
                pkt_cnt--;
                urb_len -= pkt_offset;
-       } while (skb && urb_len > 0 && pkt_cnt > 0);
+               next_skb = NULL;
+       } while (skb && pkt_cnt > 0 &&
+                urb_len >= sizeof(struct rtl8xxxu_rxdesc16));
 
        return RX_TYPE_DATA_PKT;
 }
index 68ec009..76dd881 100644 (file)
@@ -2574,7 +2574,7 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
                        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                                "path-B / 2.4G LCK\n");
                }
-               memset(&curvecount_val[0], 0, CV_CURVE_CNT * 2);
+               memset(curvecount_val, 0, sizeof(curvecount_val));
                /* Set LC calibration off */
                rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
                              0x08000, 0x0);
index cedbf38..2551e22 100644 (file)
@@ -591,8 +591,10 @@ void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb)
        struct rtw_coex *coex = &rtwdev->coex;
        u8 *payload = get_payload_from_coex_resp(skb);
 
-       if (payload[0] != COEX_RESP_ACK_BY_WL_FW)
+       if (payload[0] != COEX_RESP_ACK_BY_WL_FW) {
+               dev_kfree_skb_any(skb);
                return;
+       }
 
        skb_queue_tail(&coex->queue, skb);
        wake_up(&coex->wait);
@@ -630,20 +632,16 @@ static bool rtw_coex_get_bt_scan_type(struct rtw_dev *rtwdev, u8 *scan_type)
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
        u8 *payload;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_SCAN_TYPE;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        payload = get_payload_from_coex_resp(skb);
        *scan_type = GET_COEX_RESP_BT_SCAN_TYPE(payload);
        dev_kfree_skb_any(skb);
-       ret = true;
-
-out:
-       return ret;
+       return true;
 }
 
 static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev,
@@ -651,19 +649,15 @@ static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev,
 {
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_LNA_CONSTRAINT;
        req.para1 = lna_constrain_level;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        dev_kfree_skb_any(skb);
-       ret = true;
-
-out:
-       return ret;
+       return true;
 }
 
 #define case_BTSTATUS(src) \
@@ -3523,6 +3517,7 @@ static bool rtw_coex_get_bt_reg(struct rtw_dev *rtwdev,
 
        payload = get_payload_from_coex_resp(skb);
        *val = GET_COEX_RESP_BT_REG_VAL(payload);
+       dev_kfree_skb_any(skb);
 
        return true;
 }
@@ -3533,19 +3528,17 @@ static bool rtw_coex_get_bt_patch_version(struct rtw_dev *rtwdev,
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
        u8 *payload;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_PATCH_VER;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        payload = get_payload_from_coex_resp(skb);
        *patch_version = GET_COEX_RESP_BT_PATCH_VER(payload);
-       ret = true;
+       dev_kfree_skb_any(skb);
 
-out:
-       return ret;
+       return true;
 }
 
 static bool rtw_coex_get_bt_supported_version(struct rtw_dev *rtwdev,
@@ -3554,19 +3547,17 @@ static bool rtw_coex_get_bt_supported_version(struct rtw_dev *rtwdev,
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
        u8 *payload;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_SUPP_VER;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        payload = get_payload_from_coex_resp(skb);
        *supported_version = GET_COEX_RESP_BT_SUPP_VER(payload);
-       ret = true;
+       dev_kfree_skb_any(skb);
 
-out:
-       return ret;
+       return true;
 }
 
 static bool rtw_coex_get_bt_supported_feature(struct rtw_dev *rtwdev,
@@ -3575,19 +3566,17 @@ static bool rtw_coex_get_bt_supported_feature(struct rtw_dev *rtwdev,
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
        u8 *payload;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_SUPP_FEAT;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        payload = get_payload_from_coex_resp(skb);
        *supported_feature = GET_COEX_RESP_BT_SUPP_FEAT(payload);
-       ret = true;
+       dev_kfree_skb_any(skb);
 
-out:
-       return ret;
+       return true;
 }
 
 struct rtw_coex_sta_stat_iter_data {
index 18ab472..dfd52cf 100644 (file)
@@ -11,6 +11,7 @@
 #include "debug.h"
 #include "phy.h"
 #include "reg.h"
+#include "ps.h"
 
 #ifdef CONFIG_RTW88_DEBUGFS
 
@@ -847,7 +848,13 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
        if (!input)
                return -EINVAL;
 
+       if (test_bit(RTW_FLAG_RESTARTING, rtwdev->flags))
+               return -EINPROGRESS;
+
+       mutex_lock(&rtwdev->mutex);
+       rtw_leave_lps_deep(rtwdev);
        rtw_write8(rtwdev, REG_HRCV_MSG, 1);
+       mutex_unlock(&rtwdev->mutex);
 
        return count;
 }
index c8efd19..0dd3f9a 100644 (file)
@@ -20,6 +20,7 @@ enum rtw_debug_mask {
        RTW_DBG_BF              = 0x00000800,
        RTW_DBG_WOW             = 0x00001000,
        RTW_DBG_CFO             = 0x00002000,
+       RTW_DBG_PATH_DIV        = 0x00004000,
 
        RTW_DBG_ALL             = 0xffffffff
 };
index ea2cd4d..3bfa5ec 100644 (file)
@@ -127,6 +127,62 @@ static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
        rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
 }
 
+struct rtw_beacon_filter_iter_data {
+       struct rtw_dev *rtwdev;
+       u8 *payload;
+};
+
+static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac,
+                                             struct ieee80211_vif *vif)
+{
+       struct rtw_beacon_filter_iter_data *iter_data = data;
+       struct rtw_dev *rtwdev = iter_data->rtwdev;
+       u8 *payload = iter_data->payload;
+       u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload);
+       u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload);
+       s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload);
+
+       switch (type) {
+       case BCN_FILTER_NOTIFY_SIGNAL_CHANGE:
+               event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
+                       NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+               ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL);
+               break;
+       case BCN_FILTER_CONNECTION_LOSS:
+               ieee80211_connection_loss(vif);
+               break;
+       case BCN_FILTER_CONNECTED:
+               rtwdev->beacon_loss = false;
+               break;
+       case BCN_FILTER_NOTIFY_BEACON_LOSS:
+               rtwdev->beacon_loss = true;
+               rtw_leave_lps(rtwdev);
+               break;
+       }
+}
+
+static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload,
+                                    u8 length)
+{
+       struct rtw_beacon_filter_iter_data dev_iter_data;
+
+       dev_iter_data.rtwdev = rtwdev;
+       dev_iter_data.payload = payload;
+       rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter,
+                        &dev_iter_data);
+}
+
+static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
+                              u8 length)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+       dm_info->scan_density = payload[0];
+
+       rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n",
+               dm_info->scan_density);
+}
+
 void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
 {
        struct rtw_c2h_cmd *c2h;
@@ -152,6 +208,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
        case C2H_WLAN_INFO:
                rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
                break;
+       case C2H_BCN_FILTER_NOTIFY:
+               rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len);
+               break;
        case C2H_HALMAC:
                rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
                break;
@@ -186,6 +245,12 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
                break;
        case C2H_WLAN_RFON:
                complete(&rtwdev->lps_leave_check);
+               dev_kfree_skb_any(skb);
+               break;
+       case C2H_SCAN_RESULT:
+               complete(&rtwdev->fw_scan_density);
+               rtw_fw_scan_result(rtwdev, c2h->payload, len);
+               dev_kfree_skb_any(skb);
                break;
        default:
                /* pass offset for further operation */
@@ -527,6 +592,45 @@ void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
        rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
 }
 
+void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
+                                struct ieee80211_vif *vif)
+{
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
+       static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
+       struct rtw_sta_info *si =
+               sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
+       s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
+       u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+       if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !si)
+               return;
+
+       if (!connect) {
+               SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
+               SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
+               rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+
+               return;
+       }
+       SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
+       ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
+       rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+
+       memset(h2c_pkt, 0, sizeof(h2c_pkt));
+       threshold = clamp_t(s32, threshold, rssi_min, rssi_max);
+       SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
+       SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
+       SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
+                                              BCN_FILTER_OFFLOAD_MODE_DEFAULT);
+       SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold);
+       SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
+       SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
+       SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst);
+       SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
+       rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
 void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
 {
        struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -1613,3 +1717,13 @@ void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
 
        rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
 }
+
+void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
+{
+       u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+       SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN);
+       SET_SCAN_START(h2c_pkt, start);
+
+       rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
index 7c5b1d7..a8a7162 100644 (file)
 #define DLFW_BLK_SIZE_LEGACY           4
 #define FW_START_ADDR_LEGACY           0x1000
 
+#define BCN_LOSS_CNT                   10
+#define BCN_FILTER_NOTIFY_SIGNAL_CHANGE        0
+#define BCN_FILTER_CONNECTION_LOSS     1
+#define BCN_FILTER_CONNECTED           2
+#define BCN_FILTER_NOTIFY_BEACON_LOSS  3
+
+#define SCAN_NOTIFY_TIMEOUT  msecs_to_jiffies(10)
+
 enum rtw_c2h_cmd_id {
        C2H_CCX_TX_RPT = 0x03,
        C2H_BT_INFO = 0x09,
@@ -32,6 +40,8 @@ enum rtw_c2h_cmd_id {
        C2H_HW_FEATURE_REPORT = 0x19,
        C2H_WLAN_INFO = 0x27,
        C2H_WLAN_RFON = 0x32,
+       C2H_BCN_FILTER_NOTIFY = 0x36,
+       C2H_SCAN_RESULT = 0x38,
        C2H_HW_FEATURE_DUMP = 0xfd,
        C2H_HALMAC = 0xff,
 };
@@ -78,9 +88,20 @@ enum rtw_fw_feature {
        FW_FEATURE_LPS_C2H = BIT(1),
        FW_FEATURE_LCLK = BIT(2),
        FW_FEATURE_PG = BIT(3),
+       FW_FEATURE_BCN_FILTER = BIT(5),
+       FW_FEATURE_NOTIFY_SCAN = BIT(6),
        FW_FEATURE_MAX = BIT(31),
 };
 
+enum rtw_beacon_filter_offload_mode {
+       BCN_FILTER_OFFLOAD_MODE_0 = 0,
+       BCN_FILTER_OFFLOAD_MODE_1,
+       BCN_FILTER_OFFLOAD_MODE_2,
+       BCN_FILTER_OFFLOAD_MODE_3,
+
+       BCN_FILTER_OFFLOAD_MODE_DEFAULT = BCN_FILTER_OFFLOAD_MODE_1,
+};
+
 struct rtw_coex_info_req {
        u8 seq;
        u8 op_code;
@@ -237,6 +258,10 @@ struct rtw_fw_hdr_legacy {
 #define GET_RA_REPORT_BW(c2h_payload)          (c2h_payload[6])
 #define GET_RA_REPORT_MACID(c2h_payload)       (c2h_payload[1])
 
+#define GET_BCN_FILTER_NOTIFY_TYPE(c2h_payload)        (c2h_payload[1] & 0xf)
+#define GET_BCN_FILTER_NOTIFY_EVENT(c2h_payload)       (c2h_payload[1] & 0x10)
+#define GET_BCN_FILTER_NOTIFY_RSSI(c2h_payload)        (c2h_payload[2] - 100)
+
 /* PKT H2C */
 #define H2C_PKT_CMD_ID 0xFF
 #define H2C_PKT_CATEGORY 0x01
@@ -345,7 +370,10 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
 #define H2C_CMD_LPS_PG_INFO            0x2b
 #define H2C_CMD_RA_INFO                        0x40
 #define H2C_CMD_RSSI_MONITOR           0x42
+#define H2C_CMD_BCN_FILTER_OFFLOAD_P0  0x56
+#define H2C_CMD_BCN_FILTER_OFFLOAD_P1  0x57
 #define H2C_CMD_WL_PHY_INFO            0x58
+#define H2C_CMD_SCAN                   0x59
 
 #define H2C_CMD_COEX_TDMA_TYPE         0x60
 #define H2C_CMD_QUERY_BT_INFO          0x61
@@ -381,6 +409,23 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
        le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
 #define SET_WL_PHY_INFO_RX_EVM(h2c_pkt, value)                                \
        le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, value)                               \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, value)                      \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(16))
+#define SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, value)                        \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(20, 17))
+#define SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt, value)                \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 21))
+#define SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, value)                   \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, value)                \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(3, 0))
+#define SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, value)                \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(13, 4))
+
+#define SET_SCAN_START(h2c_pkt, value)                                        \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
 
 #define SET_PWR_MODE_SET_MODE(h2c_pkt, value)                                  \
        le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
@@ -554,6 +599,12 @@ static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
        return (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
 }
 
+static inline bool rtw_fw_feature_check(struct rtw_fw_state *fw,
+                                       enum rtw_fw_feature feature)
+{
+       return !!(fw->feature & feature);
+}
+
 void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
                               struct sk_buff *skb);
 void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
@@ -577,6 +628,8 @@ void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
 void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
 void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
 void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev);
+void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
+                                struct ieee80211_vif *vif);
 int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
                                u8 *buf, u32 size);
 void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
@@ -607,5 +660,5 @@ void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c);
 void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev);
 int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
                     u32 *buffer);
-
+void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
 #endif
index d8718b2..6f56298 100644 (file)
@@ -153,6 +153,9 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
        u8 port = 0;
        u8 bcn_ctrl = 0;
 
+       if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
+               vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+                                    IEEE80211_VIF_SUPPORTS_CQM_RSSI;
        rtwvif->port = port;
        rtwvif->stats.tx_unicast = 0;
        rtwvif->stats.rx_unicast = 0;
@@ -399,6 +402,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
                        rtw_write32_clr(rtwdev, REG_FWHW_TXQ_CTRL,
                                        BIT_EN_BCNQ_DL);
        }
+       if (changed & BSS_CHANGED_CQM)
+               rtw_fw_beacon_filter_config(rtwdev, true, vif);
 
        if (changed & BSS_CHANGED_MU_GROUPS)
                rtw_chip_set_gid_table(rtwdev, vif, conf);
@@ -450,6 +455,7 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
 {
        struct rtw_dev *rtwdev = hw->priv;
 
+       rtw_fw_beacon_filter_config(rtwdev, false, vif);
        mutex_lock(&rtwdev->mutex);
        rtw_sta_remove(rtwdev, sta, true);
        mutex_unlock(&rtwdev->mutex);
@@ -599,6 +605,7 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
        rtw_vif_port_config(rtwdev, rtwvif, config);
 
        rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
+       rtw_core_fw_scan_notify(rtwdev, true);
 
        set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
        set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
@@ -618,6 +625,8 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
        clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
        clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
 
+       rtw_core_fw_scan_notify(rtwdev, false);
+
        ether_addr_copy(rtwvif->mac_addr, vif->addr);
        config |= PORT_SET_MAC_ADDR;
        rtw_vif_port_config(rtwdev, rtwvif, config);
index f3a3a86..c636483 100644 (file)
@@ -2,6 +2,8 @@
 /* Copyright(c) 2018-2019  Realtek Corporation
  */
 
+#include <linux/devcoredump.h>
+
 #include "main.h"
 #include "regd.h"
 #include "fw.h"
@@ -239,7 +241,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
         * get that vif and check if device is having traffic more than the
         * threshold.
         */
-       if (rtwdev->ps_enabled && data.rtwvif && !ps_active)
+       if (rtwdev->ps_enabled && data.rtwvif && !ps_active &&
+           !rtwdev->beacon_loss)
                rtw_enter_lps(rtwdev, data.rtwvif->port);
 
        rtwdev->watch_dog_cnt++;
@@ -292,6 +295,7 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
        rtw_fw_media_status_report(rtwdev, si->mac_id, true);
 
        rtwdev->sta_cnt++;
+       rtwdev->beacon_loss = false;
        rtw_info(rtwdev, "sta %pM joined with macid %d\n",
                 sta->addr, si->mac_id);
 
@@ -318,59 +322,131 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
                 sta->addr, si->mac_id);
 }
 
-static bool rtw_fw_dump_crash_log(struct rtw_dev *rtwdev)
+struct rtw_fwcd_hdr {
+       u32 item;
+       u32 size;
+       u32 padding1;
+       u32 padding2;
+} __packed;
+
+static int rtw_fwcd_prep(struct rtw_dev *rtwdev)
+{
+       struct rtw_chip_info *chip = rtwdev->chip;
+       struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+       const struct rtw_fwcd_segs *segs = chip->fwcd_segs;
+       u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr);
+       u8 i;
+
+       if (segs) {
+               prep_size += segs->num * sizeof(struct rtw_fwcd_hdr);
+
+               for (i = 0; i < segs->num; i++)
+                       prep_size += segs->segs[i];
+       }
+
+       desc->data = vmalloc(prep_size);
+       if (!desc->data)
+               return -ENOMEM;
+
+       desc->size = prep_size;
+       desc->next = desc->data;
+
+       return 0;
+}
+
+static u8 *rtw_fwcd_next(struct rtw_dev *rtwdev, u32 item, u32 size)
+{
+       struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+       struct rtw_fwcd_hdr *hdr;
+       u8 *next;
+
+       if (!desc->data) {
+               rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared successfully\n");
+               return NULL;
+       }
+
+       next = desc->next + sizeof(struct rtw_fwcd_hdr);
+       if (next - desc->data + size > desc->size) {
+               rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared enough\n");
+               return NULL;
+       }
+
+       hdr = (struct rtw_fwcd_hdr *)(desc->next);
+       hdr->item = item;
+       hdr->size = size;
+       hdr->padding1 = 0x01234567;
+       hdr->padding2 = 0x89abcdef;
+       desc->next = next + size;
+
+       return next;
+}
+
+static void rtw_fwcd_dump(struct rtw_dev *rtwdev)
+{
+       struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+
+       rtw_dbg(rtwdev, RTW_DBG_FW, "dump fwcd\n");
+
+       /* Data will be freed after lifetime of device coredump. After calling
+        * dev_coredump, data is supposed to be handled by the device coredump
+        * framework. Note that a new dump will be discarded if a previous one
+        * hasn't been released yet.
+        */
+       dev_coredumpv(rtwdev->dev, desc->data, desc->size, GFP_KERNEL);
+}
+
+static void rtw_fwcd_free(struct rtw_dev *rtwdev, bool free_self)
+{
+       struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+
+       if (free_self) {
+               rtw_dbg(rtwdev, RTW_DBG_FW, "free fwcd by self\n");
+               vfree(desc->data);
+       }
+
+       desc->data = NULL;
+       desc->next = NULL;
+}
+
+static int rtw_fw_dump_crash_log(struct rtw_dev *rtwdev)
 {
        u32 size = rtwdev->chip->fw_rxff_size;
        u32 *buf;
        u8 seq;
-       bool ret = true;
 
-       buf = vmalloc(size);
+       buf = (u32 *)rtw_fwcd_next(rtwdev, RTW_FWCD_TLV, size);
        if (!buf)
-               goto exit;
+               return -ENOMEM;
 
        if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) {
                rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n");
-               goto free_buf;
+               return -EINVAL;
        }
 
        if (GET_FW_DUMP_LEN(buf) == 0) {
                rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n");
-               goto free_buf;
+               return -EINVAL;
        }
 
        seq = GET_FW_DUMP_SEQ(buf);
-       if (seq > 0 && seq != (rtwdev->fw.prev_dump_seq + 1)) {
+       if (seq > 0) {
                rtw_dbg(rtwdev, RTW_DBG_FW,
                        "fw crash dump's seq is wrong: %d\n", seq);
-               goto free_buf;
-       }
-
-       print_hex_dump(KERN_ERR, "rtw88 fw dump: ", DUMP_PREFIX_OFFSET, 16, 1,
-                      buf, size, true);
-
-       if (GET_FW_DUMP_MORE(buf) == 1) {
-               rtwdev->fw.prev_dump_seq = seq;
-               ret = false;
+               return -EINVAL;
        }
 
-free_buf:
-       vfree(buf);
-exit:
-       rtw_write8(rtwdev, REG_MCU_TST_CFG, 0);
-
-       return ret;
+       return 0;
 }
 
 int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
-               const char *prefix_str)
+               u32 fwcd_item)
 {
        u32 rxff = rtwdev->chip->fw_rxff_size;
        u32 dump_size, done_size = 0;
        u8 *buf;
        int ret;
 
-       buf = vzalloc(size);
+       buf = rtw_fwcd_next(rtwdev, fwcd_item, size);
        if (!buf)
                return -ENOMEM;
 
@@ -383,7 +459,7 @@ int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
                        rtw_err(rtwdev,
                                "ddma fw 0x%x [+0x%x] to fw fifo fail\n",
                                ocp_src, done_size);
-                       goto exit;
+                       return ret;
                }
 
                ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0,
@@ -392,24 +468,18 @@ int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
                        rtw_err(rtwdev,
                                "dump fw 0x%x [+0x%x] from fw fifo fail\n",
                                ocp_src, done_size);
-                       goto exit;
+                       return ret;
                }
 
                size -= dump_size;
                done_size += dump_size;
        }
 
-       print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 1,
-                      buf, done_size, true);
-
-exit:
-       vfree(buf);
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(rtw_dump_fw);
 
-int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
-                const char *prefix_str)
+int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size)
 {
        u8 *buf;
        u32 i;
@@ -419,17 +489,13 @@ int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
                return -EINVAL;
        }
 
-       buf = vzalloc(size);
+       buf = rtw_fwcd_next(rtwdev, RTW_FWCD_REG, size);
        if (!buf)
                return -ENOMEM;
 
        for (i = 0; i < size; i += 4)
                *(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i);
 
-       print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf,
-                      size, true);
-
-       vfree(buf);
        return 0;
 }
 EXPORT_SYMBOL(rtw_dump_reg);
@@ -487,20 +553,24 @@ void rtw_fw_recovery(struct rtw_dev *rtwdev)
 
 static void __fw_recovery_work(struct rtw_dev *rtwdev)
 {
-
-       /* rtw_fw_dump_crash_log() returns false indicates that there are
-        * still more log to dump. Driver set 0x1cf[7:0] = 0x1 to tell firmware
-        * to dump the remaining part of the log, and firmware will trigger an
-        * IMR_C2HCMD interrupt to inform driver the log is ready.
-        */
-       if (!rtw_fw_dump_crash_log(rtwdev)) {
-               rtw_write8(rtwdev, REG_HRCV_MSG, 1);
-               return;
-       }
-       rtwdev->fw.prev_dump_seq = 0;
+       int ret = 0;
 
        set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
-       rtw_chip_dump_fw_crash(rtwdev);
+
+       ret = rtw_fwcd_prep(rtwdev);
+       if (ret)
+               goto free;
+       ret = rtw_fw_dump_crash_log(rtwdev);
+       if (ret)
+               goto free;
+       ret = rtw_chip_dump_fw_crash(rtwdev);
+       if (ret)
+               goto free;
+
+       rtw_fwcd_dump(rtwdev);
+free:
+       rtw_fwcd_free(rtwdev, !!ret);
+       rtw_write8(rtwdev, REG_MCU_TST_CFG, 0);
 
        WARN(1, "firmware crash, start reset and recover\n");
 
@@ -1109,11 +1179,11 @@ static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
                return LPS_DEEP_MODE_NONE;
 
        if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_PG)) &&
-           (fw->feature & FW_FEATURE_PG))
+           rtw_fw_feature_check(fw, FW_FEATURE_PG))
                return LPS_DEEP_MODE_PG;
 
        if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_LCLK)) &&
-           (fw->feature & FW_FEATURE_LCLK))
+           rtw_fw_feature_check(fw, FW_FEATURE_LCLK))
                return LPS_DEEP_MODE_LCLK;
 
        return LPS_DEEP_MODE_NONE;
@@ -1183,6 +1253,22 @@ err:
        return ret;
 }
 
+void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
+{
+       if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_NOTIFY_SCAN))
+               return;
+
+       if (start) {
+               rtw_fw_scan_notify(rtwdev, true);
+       } else {
+               reinit_completion(&rtwdev->fw_scan_density);
+               rtw_fw_scan_notify(rtwdev, false);
+               if (!wait_for_completion_timeout(&rtwdev->fw_scan_density,
+                                                SCAN_NOTIFY_TIMEOUT))
+                       rtw_warn(rtwdev, "firmware failed to report density after scan\n");
+       }
+}
+
 int rtw_core_start(struct rtw_dev *rtwdev)
 {
        int ret;
@@ -1761,6 +1847,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
 
        init_waitqueue_head(&rtwdev->coex.wait);
        init_completion(&rtwdev->lps_leave_check);
+       init_completion(&rtwdev->fw_scan_density);
 
        rtwdev->sec.total_cam_num = 32;
        rtwdev->hal.current_channel = 1;
@@ -1812,6 +1899,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
        destroy_workqueue(rtwdev->tx_wq);
        spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
        skb_queue_purge(&rtwdev->tx_report.queue);
+       skb_queue_purge(&rtwdev->coex.queue);
        spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
 
        list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
index dc37448..e5af375 100644 (file)
@@ -806,7 +806,7 @@ struct rtw_regulatory {
 
 struct rtw_chip_ops {
        int (*mac_init)(struct rtw_dev *rtwdev);
-       void (*dump_fw_crash)(struct rtw_dev *rtwdev);
+       int (*dump_fw_crash)(struct rtw_dev *rtwdev);
        void (*shutdown)(struct rtw_dev *rtwdev);
        int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
        void (*phy_set_param)(struct rtw_dev *rtwdev);
@@ -841,6 +841,10 @@ struct rtw_chip_ops {
                             u8 fixrate_en, u8 *new_rate);
        void (*cfo_init)(struct rtw_dev *rtwdev);
        void (*cfo_track)(struct rtw_dev *rtwdev);
+       void (*config_tx_path)(struct rtw_dev *rtwdev, u8 tx_path,
+                              enum rtw_bb_path tx_path_1ss,
+                              enum rtw_bb_path tx_path_cck,
+                              bool is_tx2_path);
 
        /* for coex */
        void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -1108,6 +1112,15 @@ enum rtw_fw_fifo_sel {
        RTW_FW_FIFO_MAX,
 };
 
+enum rtw_fwcd_item {
+       RTW_FWCD_TLV,
+       RTW_FWCD_REG,
+       RTW_FWCD_ROM,
+       RTW_FWCD_IMEM,
+       RTW_FWCD_DMEM,
+       RTW_FWCD_EMEM,
+};
+
 /* hardware configuration for each IC */
 struct rtw_chip_info {
        struct rtw_chip_ops *ops;
@@ -1136,7 +1149,11 @@ struct rtw_chip_info {
        u8 max_power_index;
 
        u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
+       const struct rtw_fwcd_segs *fwcd_segs;
+
+       u8 default_1ss_tx_path;
 
+       bool path_div_supported;
        bool ht_supported;
        bool vht_supported;
        u8 lps_deep_mode_supported;
@@ -1614,6 +1631,8 @@ struct rtw_dm_info {
        struct rtw_iqk_info iqk;
        struct rtw_gapk_info gapk;
        bool is_bt_iqk_timeout;
+
+       u8 scan_density;
 };
 
 struct rtw_efuse {
@@ -1717,6 +1736,17 @@ struct rtw_fifo_conf {
        const struct rtw_rqpn *rqpn;
 };
 
+struct rtw_fwcd_desc {
+       u32 size;
+       u8 *next;
+       u8 *data;
+};
+
+struct rtw_fwcd_segs {
+       const u32 *segs;
+       u8 num;
+};
+
 #define FW_CD_TYPE 0xffff
 #define FW_CD_LEN 4
 #define FW_CD_VAL 0xaabbccdd
@@ -1724,11 +1754,11 @@ struct rtw_fw_state {
        const struct firmware *firmware;
        struct rtw_dev *rtwdev;
        struct completion completion;
+       struct rtw_fwcd_desc fwcd_desc;
        u16 version;
        u8 sub_version;
        u8 sub_index;
        u16 h2c_version;
-       u8 prev_dump_seq;
        u32 feature;
 };
 
@@ -1781,6 +1811,14 @@ struct rtw_hal {
                     [DESC_RATE_MAX];
 };
 
+struct rtw_path_div {
+       enum rtw_bb_path current_tx_path;
+       u32 path_a_sum;
+       u32 path_b_sum;
+       u16 path_a_cnt;
+       u16 path_b_cnt;
+};
+
 struct rtw_dev {
        struct ieee80211_hw *hw;
        struct device *dev;
@@ -1837,6 +1875,7 @@ struct rtw_dev {
        /* lps power state & handler work */
        struct rtw_lps_conf lps_conf;
        bool ps_enabled;
+       bool beacon_loss;
        struct completion lps_leave_check;
 
        struct dentry *debugfs;
@@ -1848,11 +1887,13 @@ struct rtw_dev {
        DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS);
 
        u8 mp_mode;
+       struct rtw_path_div dm_path_div;
 
        struct rtw_fw_state wow_fw;
        struct rtw_wow_param wow;
 
        bool need_rfk;
+       struct completion fw_scan_density;
 
        /* hci related data, must be last */
        u8 priv[] __aligned(sizeof(void *));
@@ -1923,10 +1964,12 @@ static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
        clear_bit(mac_id, rtwdev->mac_id_map);
 }
 
-static inline void rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
+static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
 {
        if (rtwdev->chip->ops->dump_fw_crash)
-               rtwdev->chip->ops->dump_fw_crash(rtwdev);
+               return rtwdev->chip->ops->dump_fw_crash(rtwdev);
+
+       return 0;
 }
 
 void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
@@ -1958,9 +2001,9 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
 void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
                    bool fw_exist);
 void rtw_fw_recovery(struct rtw_dev *rtwdev);
+void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
 int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
-               const char *prefix_str);
-int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
-                const char *prefix_str);
+               u32 fwcd_item);
+int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size);
 
 #endif
index f59a4c4..e7d17ab 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright(c) 2018-2019  Realtek Corporation
  */
 
+#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include "main.h"
@@ -1673,6 +1674,36 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
        netif_napi_del(&rtwpci->napi);
 }
 
+enum rtw88_quirk_dis_pci_caps {
+       QUIRK_DIS_PCI_CAP_MSI,
+       QUIRK_DIS_PCI_CAP_ASPM,
+};
+
+static int disable_pci_caps(const struct dmi_system_id *dmi)
+{
+       uintptr_t dis_caps = (uintptr_t)dmi->driver_data;
+
+       if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_MSI))
+               rtw_disable_msi = true;
+       if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_ASPM))
+               rtw_pci_disable_aspm = true;
+
+       return 1;
+}
+
+static const struct dmi_system_id rtw88_pci_quirks[] = {
+       {
+               .callback = disable_pci_caps,
+               .ident = "Protempo Ltd L116HTN6SPW",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Protempo Ltd"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "L116HTN6SPW"),
+               },
+               .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
+       },
+       {}
+};
+
 int rtw_pci_probe(struct pci_dev *pdev,
                  const struct pci_device_id *id)
 {
@@ -1723,6 +1754,7 @@ int rtw_pci_probe(struct pci_dev *pdev,
                goto err_destroy_pci;
        }
 
+       dmi_check_system(rtw88_pci_quirks);
        rtw_pci_phy_cfg(rtwdev);
 
        ret = rtw_register_hw(rtwdev, hw);
index 8146aca..569dd3c 100644 (file)
@@ -127,6 +127,17 @@ static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
                chip->ops->cfo_init(rtwdev);
 }
 
+static void rtw_phy_tx_path_div_init(struct rtw_dev *rtwdev)
+{
+       struct rtw_path_div *path_div = &rtwdev->dm_path_div;
+
+       path_div->current_tx_path = rtwdev->chip->default_1ss_tx_path;
+       path_div->path_a_cnt = 0;
+       path_div->path_a_sum = 0;
+       path_div->path_b_cnt = 0;
+       path_div->path_b_sum = 0;
+}
+
 void rtw_phy_init(struct rtw_dev *rtwdev)
 {
        struct rtw_chip_info *chip = rtwdev->chip;
@@ -149,6 +160,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev)
 
        dm_info->iqk.done = false;
        rtw_phy_cfo_init(rtwdev);
+       rtw_phy_tx_path_div_init(rtwdev);
 }
 EXPORT_SYMBOL(rtw_phy_init);
 
@@ -695,6 +707,7 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
        rtw_phy_dig(rtwdev);
        rtw_phy_cck_pd(rtwdev);
        rtw_phy_ra_track(rtwdev);
+       rtw_phy_tx_path_diversity(rtwdev);
        rtw_phy_cfo_track(rtwdev);
        rtw_phy_dpk_track(rtwdev);
        rtw_phy_pwr_track(rtwdev);
@@ -2315,3 +2328,71 @@ bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
        return false;
 }
 EXPORT_SYMBOL(rtw_phy_pwrtrack_need_iqk);
+
+static void rtw_phy_set_tx_path_by_reg(struct rtw_dev *rtwdev,
+                                      enum rtw_bb_path tx_path_sel_1ss)
+{
+       struct rtw_path_div *path_div = &rtwdev->dm_path_div;
+       enum rtw_bb_path tx_path_sel_cck = tx_path_sel_1ss;
+       struct rtw_chip_info *chip = rtwdev->chip;
+
+       if (tx_path_sel_1ss == path_div->current_tx_path)
+               return;
+
+       path_div->current_tx_path = tx_path_sel_1ss;
+       rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "Switch TX path=%s\n",
+               tx_path_sel_1ss == BB_PATH_A ? "A" : "B");
+       chip->ops->config_tx_path(rtwdev, rtwdev->hal.antenna_tx,
+                                 tx_path_sel_1ss, tx_path_sel_cck, false);
+}
+
+static void rtw_phy_tx_path_div_select(struct rtw_dev *rtwdev)
+{
+       struct rtw_path_div *path_div = &rtwdev->dm_path_div;
+       enum rtw_bb_path path = path_div->current_tx_path;
+       s32 rssi_a = 0, rssi_b = 0;
+
+       if (path_div->path_a_cnt)
+               rssi_a = path_div->path_a_sum / path_div->path_a_cnt;
+       else
+               rssi_a = 0;
+       if (path_div->path_b_cnt)
+               rssi_b = path_div->path_b_sum / path_div->path_b_cnt;
+       else
+               rssi_b = 0;
+
+       if (rssi_a != rssi_b)
+               path = (rssi_a > rssi_b) ? BB_PATH_A : BB_PATH_B;
+
+       path_div->path_a_cnt = 0;
+       path_div->path_a_sum = 0;
+       path_div->path_b_cnt = 0;
+       path_div->path_b_sum = 0;
+       rtw_phy_set_tx_path_by_reg(rtwdev, path);
+}
+
+static void rtw_phy_tx_path_diversity_2ss(struct rtw_dev *rtwdev)
+{
+       if (rtwdev->hal.antenna_rx != BB_PATH_AB) {
+               rtw_dbg(rtwdev, RTW_DBG_PATH_DIV,
+                       "[Return] tx_Path_en=%d, rx_Path_en=%d\n",
+                       rtwdev->hal.antenna_tx, rtwdev->hal.antenna_rx);
+               return;
+       }
+       if (rtwdev->sta_cnt == 0) {
+               rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "No Link\n");
+               return;
+       }
+
+       rtw_phy_tx_path_div_select(rtwdev);
+}
+
+void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev)
+{
+       struct rtw_chip_info *chip = rtwdev->chip;
+
+       if (!chip->path_div_supported)
+               return;
+
+       rtw_phy_tx_path_diversity_2ss(rtwdev);
+}
index 0b6f2fc..112ed12 100644 (file)
@@ -61,6 +61,7 @@ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
                                struct rtw_swing_table *swing_table);
 void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
                         struct rtw_rx_pkt_stat *pkt_stat);
+void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev);
 
 struct rtw_txpwr_lmt_cfg_pair {
        u8 regd;
index 3bead34..3f0ac33 100644 (file)
@@ -152,7 +152,7 @@ static void rtw_fw_leave_lps_check(struct rtw_dev *rtwdev)
        else
                fw = &rtwdev->fw;
 
-       if (fw->feature & FW_FEATURE_LPS_C2H)
+       if (rtw_fw_feature_check(fw, FW_FEATURE_LPS_C2H))
                ret = __rtw_fw_leave_lps_check_c2h(rtwdev);
        else
                ret = __rtw_fw_leave_lps_check_reg(rtwdev);
@@ -172,7 +172,7 @@ static void rtw_fw_leave_lps_check_prepare(struct rtw_dev *rtwdev)
        else
                fw = &rtwdev->fw;
 
-       if (fw->feature & FW_FEATURE_LPS_C2H)
+       if (rtw_fw_feature_check(fw, FW_FEATURE_LPS_C2H))
                reinit_completion(&rtwdev->lps_leave_check);
 }
 
index 6cb593c..8bf3cd3 100644 (file)
@@ -17,7 +17,6 @@
 #include "util.h"
 #include "bf.h"
 #include "efuse.h"
-#include "coex.h"
 
 #define IQK_DONE_8822C 0xaa
 
@@ -80,6 +79,13 @@ static void rtw8822c_header_file_init(struct rtw_dev *rtwdev, bool pre)
                rtw_write32_set(rtwdev, REG_ENCCK, BIT_CCK_OFDM_BLK_EN);
 }
 
+static void rtw8822c_bb_reset(struct rtw_dev *rtwdev)
+{
+       rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+       rtw_write16_clr(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+       rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+}
+
 static void rtw8822c_dac_backup_reg(struct rtw_dev *rtwdev,
                                    struct rtw_backup_info *backup,
                                    struct rtw_backup_info *backup_rf)
@@ -2103,13 +2109,51 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
        return 0;
 }
 
-static void rtw8822c_dump_fw_crash(struct rtw_dev *rtwdev)
+#define FWCD_SIZE_REG_8822C 0x2000
+#define FWCD_SIZE_DMEM_8822C 0x10000
+#define FWCD_SIZE_IMEM_8822C 0x10000
+#define FWCD_SIZE_EMEM_8822C 0x20000
+#define FWCD_SIZE_ROM_8822C 0x10000
+
+static const u32 __fwcd_segs_8822c[] = {
+       FWCD_SIZE_REG_8822C,
+       FWCD_SIZE_DMEM_8822C,
+       FWCD_SIZE_IMEM_8822C,
+       FWCD_SIZE_EMEM_8822C,
+       FWCD_SIZE_ROM_8822C,
+};
+
+static const struct rtw_fwcd_segs rtw8822c_fwcd_segs = {
+       .segs = __fwcd_segs_8822c,
+       .num = ARRAY_SIZE(__fwcd_segs_8822c),
+};
+
+static int rtw8822c_dump_fw_crash(struct rtw_dev *rtwdev)
 {
-       rtw_dump_reg(rtwdev, 0x0, 0x2000, "rtw8822c reg_");
-       rtw_dump_fw(rtwdev, OCPBASE_DMEM_88XX, 0x10000, "rtw8822c DMEM_");
-       rtw_dump_fw(rtwdev, OCPBASE_IMEM_88XX, 0x10000, "rtw8822c IMEM_");
-       rtw_dump_fw(rtwdev, OCPBASE_EMEM_88XX, 0x20000, "rtw8822c EMEM_");
-       rtw_dump_fw(rtwdev, OCPBASE_ROM_88XX, 0x10000, "rtw8822c ROM_");
+#define __dump_fw_8822c(_dev, _mem) \
+       rtw_dump_fw(_dev, OCPBASE_ ## _mem ## _88XX, \
+                   FWCD_SIZE_ ## _mem ## _8822C, RTW_FWCD_ ## _mem)
+       int ret;
+
+       ret = rtw_dump_reg(rtwdev, 0x0, FWCD_SIZE_REG_8822C);
+       if (ret)
+               return ret;
+       ret = __dump_fw_8822c(rtwdev, DMEM);
+       if (ret)
+               return ret;
+       ret = __dump_fw_8822c(rtwdev, IMEM);
+       if (ret)
+               return ret;
+       ret = __dump_fw_8822c(rtwdev, EMEM);
+       if (ret)
+               return ret;
+       ret = __dump_fw_8822c(rtwdev, ROM);
+       if (ret)
+               return ret;
+
+       return 0;
+
+#undef __dump_fw_8822c
 }
 
 static void rtw8822c_rstb_3wire(struct rtw_dev *rtwdev, bool enable)
@@ -2424,10 +2468,11 @@ static void rtw8822c_config_cck_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
                else
                        rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
        }
+       rtw8822c_bb_reset(rtwdev);
 }
 
 static void rtw8822c_config_ofdm_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
-                                        bool is_tx2_path)
+                                        enum rtw_bb_path tx_path_sel_1ss)
 {
        if (tx_path == BB_PATH_A) {
                rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x11);
@@ -2436,21 +2481,28 @@ static void rtw8822c_config_ofdm_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
                rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x12);
                rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xff, 0x0);
        } else {
-               if (is_tx2_path) {
+               if (tx_path_sel_1ss == BB_PATH_AB) {
                        rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x33);
                        rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0404);
-               } else {
+               } else if (tx_path_sel_1ss == BB_PATH_B) {
+                       rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x32);
+                       rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0400);
+               } else if (tx_path_sel_1ss == BB_PATH_A) {
                        rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x31);
                        rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0400);
                }
        }
+       rtw8822c_bb_reset(rtwdev);
 }
 
 static void rtw8822c_config_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
+                                   enum rtw_bb_path tx_path_sel_1ss,
+                                   enum rtw_bb_path tx_path_cck,
                                    bool is_tx2_path)
 {
-       rtw8822c_config_cck_tx_path(rtwdev, tx_path, is_tx2_path);
-       rtw8822c_config_ofdm_tx_path(rtwdev, tx_path, is_tx2_path);
+       rtw8822c_config_cck_tx_path(rtwdev, tx_path_cck, is_tx2_path);
+       rtw8822c_config_ofdm_tx_path(rtwdev, tx_path, tx_path_sel_1ss);
+       rtw8822c_bb_reset(rtwdev);
 }
 
 static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
@@ -2466,7 +2518,8 @@ static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
                rtw_write32_mask(rtwdev, REG_ORITXCODE2, MASK20BITS, 0x11111);
 
        rtw8822c_config_rx_path(rtwdev, rx_path);
-       rtw8822c_config_tx_path(rtwdev, tx_path, is_tx2_path);
+       rtw8822c_config_tx_path(rtwdev, tx_path, BB_PATH_A, BB_PATH_A,
+                               is_tx2_path);
 
        rtw8822c_toggle_igi(rtwdev);
 }
@@ -2517,6 +2570,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
 static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
                                   struct rtw_rx_pkt_stat *pkt_stat)
 {
+       struct rtw_path_div *p_div = &rtwdev->dm_path_div;
        struct rtw_dm_info *dm_info = &rtwdev->dm_info;
        u8 rxsc, bw;
        s8 min_rx_power = -120;
@@ -2559,6 +2613,13 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
        for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
                rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
                dm_info->rssi[path] = rssi;
+               if (path == RF_PATH_A) {
+                       p_div->path_a_sum += rssi;
+                       p_div->path_a_cnt++;
+               } else if (path == RF_PATH_B) {
+                       p_div->path_b_sum += rssi;
+                       p_div->path_b_cnt++;
+               }
                dm_info->rx_snr[path] = pkt_stat->rx_snr[path] >> 1;
                dm_info->cfo_tail[path] = (pkt_stat->cfo_tail[path] * 5) >> 1;
 
@@ -4371,26 +4432,28 @@ static void rtw8822c_pwrtrack_set(struct rtw_dev *rtwdev, u8 rf_path)
        }
 }
 
-static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
-                                   struct rtw_swing_table *swing_table,
-                                   u8 path)
+static void rtw8822c_pwr_track_stats(struct rtw_dev *rtwdev, u8 path)
 {
-       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
-       u8 thermal_value, delta;
+       u8 thermal_value;
 
        if (rtwdev->efuse.thermal_meter[path] == 0xff)
                return;
 
        thermal_value = rtw_read_rf(rtwdev, path, RF_T_METER, 0x7e);
-
        rtw_phy_pwrtrack_avg(rtwdev, thermal_value, path);
+}
 
-       delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
+static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
+                                   struct rtw_swing_table *swing_table,
+                                   u8 path)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+       u8 delta;
 
+       delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
        dm_info->delta_power_index[path] =
                rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, path, path,
                                            delta);
-
        rtw8822c_pwrtrack_set(rtwdev, path);
 }
 
@@ -4401,12 +4464,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
 
        rtw_phy_config_swing_table(rtwdev, &swing_table);
 
+       for (i = 0; i < rtwdev->hal.rf_path_num; i++)
+               rtw8822c_pwr_track_stats(rtwdev, i);
        if (rtw_phy_pwrtrack_need_lck(rtwdev))
                rtw8822c_do_lck(rtwdev);
-
        for (i = 0; i < rtwdev->hal.rf_path_num; i++)
                rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
-
 }
 
 static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
@@ -4851,6 +4914,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
        .cfg_csi_rate           = rtw_bf_cfg_csi_rate,
        .cfo_init               = rtw8822c_cfo_init,
        .cfo_track              = rtw8822c_cfo_track,
+       .config_tx_path         = rtw8822c_config_tx_path,
 
        .coex_set_init          = rtw8822c_coex_cfg_init,
        .coex_set_ant_switch    = NULL,
@@ -5192,6 +5256,8 @@ struct rtw_chip_info rtw8822c_hw_spec = {
        .band = RTW_BAND_2G | RTW_BAND_5G,
        .page_size = 128,
        .dig_min = 0x20,
+       .default_1ss_tx_path = BB_PATH_A,
+       .path_div_supported = true,
        .ht_supported = true,
        .vht_supported = true,
        .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK) | BIT(LPS_DEEP_MODE_PG),
@@ -5259,6 +5325,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
        .coex_info_hw_regs = coex_info_hw_regs_8822c,
 
        .fw_fifo_addr = {0x780, 0x700, 0x780, 0x660, 0x650, 0x680},
+       .fwcd_segs = &rtw8822c_fwcd_segs,
 };
 EXPORT_SYMBOL(rtw8822c_hw_spec);
 
index 822f3da..f9e3d07 100644 (file)
@@ -16812,53 +16812,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x00010E46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -18762,53 +18762,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x0000EA46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -18957,53 +18957,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x0000EA46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -19152,53 +19152,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x0000EA46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -19347,53 +19347,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x0000EA46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -19610,21 +19610,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19633,21 +19633,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19656,21 +19656,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19679,21 +19679,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19702,21 +19702,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19725,21 +19725,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19748,21 +19748,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19771,21 +19771,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19794,21 +19794,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19817,21 +19817,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19840,21 +19840,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19863,21 +19863,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19886,21 +19886,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19909,21 +19909,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19932,21 +19932,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19955,21 +19955,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19978,21 +19978,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20001,21 +20001,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20024,21 +20024,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20047,21 +20047,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20070,21 +20070,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20093,21 +20093,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20116,21 +20116,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20139,21 +20139,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0xA0000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000487,
@@ -38484,21 +38484,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38507,21 +38507,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38530,21 +38530,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38553,21 +38553,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38576,21 +38576,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38599,21 +38599,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38622,21 +38622,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38645,21 +38645,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38668,21 +38668,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38691,21 +38691,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38714,21 +38714,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38737,21 +38737,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38760,21 +38760,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38783,21 +38783,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38806,21 +38806,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38829,21 +38829,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38852,21 +38852,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38875,21 +38875,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38898,21 +38898,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38921,21 +38921,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38944,21 +38944,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38967,21 +38967,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38990,21 +38990,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -39013,21 +39013,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0xA0000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000487,
index 13d4ae8..faf603c 100644 (file)
@@ -16,7 +16,6 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
 int fwnode_mdiobus_register_phy(struct mii_bus *bus,
                                struct fwnode_handle *child, u32 addr);
 
-int fwnode_mdiobus_register(struct mii_bus *bus, struct fwnode_handle *fwnode);
 #else /* CONFIG_FWNODE_MDIO */
 int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
                                       struct phy_device *phy,
@@ -31,17 +30,6 @@ static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus,
 {
        return -EINVAL;
 }
-
-static inline int fwnode_mdiobus_register(struct mii_bus *bus,
-                                         struct fwnode_handle *fwnode)
-{
-       /*
-        * Fall back to mdiobus_register() function to register a bus.
-        * This way, we don't have to keep compat bits around in drivers.
-        */
-
-       return mdiobus_register(mdio);
-}
 #endif
 
 #endif /* __LINUX_FWNODE_MDIO_H */
index c93a2cd..161cdf7 100644 (file)
@@ -8169,6 +8169,8 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
        dev_notice(&(wiphy)->dev, format, ##args)
 #define wiphy_info(wiphy, format, args...)                     \
        dev_info(&(wiphy)->dev, format, ##args)
+#define wiphy_info_once(wiphy, format, args...)                        \
+       dev_info_once(&(wiphy)->dev, format, ##args)
 
 #define wiphy_err_ratelimited(wiphy, format, args...)          \
        dev_err_ratelimited(&(wiphy)->dev, format, ##args)
index 96f9cf8..1f47bef 100644 (file)
@@ -159,22 +159,26 @@ unsigned int nf_ct_port_nlattr_tuple_size(void);
 extern const struct nla_policy nf_ct_port_nla_policy[];
 
 #ifdef CONFIG_SYSCTL
-__printf(3, 4) __cold
+__printf(4, 5) __cold
 void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
                               const struct nf_conn *ct,
+                              const struct nf_hook_state *state,
                               const char *fmt, ...);
-__printf(5, 6) __cold
+__printf(4, 5) __cold
 void nf_l4proto_log_invalid(const struct sk_buff *skb,
-                           struct net *net,
-                           u16 pf, u8 protonum,
+                           const struct nf_hook_state *state,
+                           u8 protonum,
                            const char *fmt, ...);
 #else
-static inline __printf(5, 6) __cold
-void nf_l4proto_log_invalid(const struct sk_buff *skb, struct net *net,
-                           u16 pf, u8 protonum, const char *fmt, ...) {}
-static inline __printf(3, 4) __cold
+static inline __printf(4, 5) __cold
+void nf_l4proto_log_invalid(const struct sk_buff *skb,
+                           const struct nf_hook_state *state,
+                           u8 protonum,
+                           const char *fmt, ...) {}
+static inline __printf(4, 5) __cold
 void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
                               const struct nf_conn *ct,
+                              const struct nf_hook_state *state,
                               const char *fmt, ...) { }
 #endif /* CONFIG_SYSCTL */
 
index 46c8d5b..0fa5a6d 100644 (file)
@@ -16,6 +16,7 @@ extern struct nft_expr_type nft_range_type;
 extern struct nft_expr_type nft_meta_type;
 extern struct nft_expr_type nft_rt_type;
 extern struct nft_expr_type nft_exthdr_type;
+extern struct nft_expr_type nft_last_type;
 
 #ifdef CONFIG_NETWORK_SECMARK
 extern struct nft_object_type nft_secmark_obj_type;
index 1e62551..c99ffe9 100644 (file)
@@ -37,8 +37,15 @@ enum qdisc_state_t {
        __QDISC_STATE_SCHED,
        __QDISC_STATE_DEACTIVATED,
        __QDISC_STATE_MISSED,
+       __QDISC_STATE_DRAINING,
 };
 
+#define QDISC_STATE_MISSED     BIT(__QDISC_STATE_MISSED)
+#define QDISC_STATE_DRAINING   BIT(__QDISC_STATE_DRAINING)
+
+#define QDISC_STATE_NON_EMPTY  (QDISC_STATE_MISSED | \
+                                       QDISC_STATE_DRAINING)
+
 struct qdisc_size_table {
        struct rcu_head         rcu;
        struct list_head        list;
@@ -110,8 +117,6 @@ struct Qdisc {
        spinlock_t              busylock ____cacheline_aligned_in_smp;
        spinlock_t              seqlock;
 
-       /* for NOLOCK qdisc, true if there are no enqueued skbs */
-       bool                    empty;
        struct rcu_head         rcu;
 
        /* private data */
@@ -145,6 +150,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
        return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
 }
 
+static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
+{
+       return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
+}
+
 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
 {
        return q->flags & TCQ_F_CPUSTATS;
@@ -153,7 +163,7 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
 static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
 {
        if (qdisc_is_percpu_stats(qdisc))
-               return READ_ONCE(qdisc->empty);
+               return nolock_qdisc_is_empty(qdisc);
        return !READ_ONCE(qdisc->q.qlen);
 }
 
@@ -161,7 +171,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
 {
        if (qdisc->flags & TCQ_F_NOLOCK) {
                if (spin_trylock(&qdisc->seqlock))
-                       goto nolock_empty;
+                       return true;
 
                /* If the MISSED flag is set, it means other thread has
                 * set the MISSED flag before second spin_trylock(), so
@@ -183,11 +193,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
                /* Retry again in case other CPU may not see the new flag
                 * after it releases the lock at the end of qdisc_run_end().
                 */
-               if (!spin_trylock(&qdisc->seqlock))
-                       return false;
-
-nolock_empty:
-               WRITE_ONCE(qdisc->empty, false);
+               return spin_trylock(&qdisc->seqlock);
        } else if (qdisc_is_running(qdisc)) {
                return false;
        }
@@ -201,15 +207,14 @@ nolock_empty:
 
 static inline void qdisc_run_end(struct Qdisc *qdisc)
 {
-       write_seqcount_end(&qdisc->running);
        if (qdisc->flags & TCQ_F_NOLOCK) {
                spin_unlock(&qdisc->seqlock);
 
                if (unlikely(test_bit(__QDISC_STATE_MISSED,
-                                     &qdisc->state))) {
-                       clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+                                     &qdisc->state)))
                        __netif_schedule(qdisc);
-               }
+       } else {
+               write_seqcount_end(&qdisc->running);
        }
 }
 
index 9eaa701..c4a4c17 100644 (file)
@@ -987,7 +987,8 @@ struct sctp_transport {
                __u16 pmtu;
                __u16 probe_size;
                __u16 probe_high;
-               __u8 probe_count;
+               __u8 probe_count:3;
+               __u8 raise_count:5;
                __u8 state;
        } pl; /* plpmtud related */
 
index 8341a8d..8d398a5 100644 (file)
@@ -79,8 +79,6 @@
        __SNMP_INC_STATS((net)->mib.tls_statistics, field)
 #define TLS_INC_STATS(net, field)                              \
        SNMP_INC_STATS((net)->mib.tls_statistics, field)
-#define __TLS_DEC_STATS(net, field)                            \
-       __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
 #define TLS_DEC_STATS(net, field)                              \
        SNMP_DEC_STATS((net)->mib.tls_statistics, field)
 
index c58a6d4..1d803e8 100644 (file)
@@ -1024,6 +1024,7 @@ struct xfrm_offload {
 #define CRYPTO_INVALID_PROTOCOL                        128
 
        __u8                    proto;
+       __u8                    inner_ipproto;
 };
 
 struct sec_path {
index 4dcd13d..d588c24 100644 (file)
 #define SO_PREFER_BUSY_POLL    69
 #define SO_BUSY_POLL_BUDGET    70
 
+#define SO_NETNS_COOKIE                71
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
index 19715e2..e94d1fa 100644 (file)
@@ -1195,6 +1195,21 @@ enum nft_counter_attributes {
 };
 #define NFTA_COUNTER_MAX       (__NFTA_COUNTER_MAX - 1)
 
+/**
+ * enum nft_last_attributes - nf_tables last expression netlink attributes
+ *
+ * @NFTA_LAST_SET: last update has been set, zero means never updated (NLA_U32)
+ * @NFTA_LAST_MSECS: milliseconds since last update (NLA_U64)
+ */
+enum nft_last_attributes {
+       NFTA_LAST_UNSPEC,
+       NFTA_LAST_SET,
+       NFTA_LAST_MSECS,
+       NFTA_LAST_PAD,
+       __NFTA_LAST_MAX
+};
+#define NFTA_LAST_MAX  (__NFTA_LAST_MAX - 1)
+
 /**
  * enum nft_log_attributes - nf_tables log expression netlink attributes
  *
index 26fc60c..904909d 100644 (file)
@@ -290,6 +290,8 @@ enum
        LINUX_MIB_TCPDUPLICATEDATAREHASH,       /* TCPDuplicateDataRehash */
        LINUX_MIB_TCPDSACKRECVSEGS,             /* TCPDSACKRecvSegs */
        LINUX_MIB_TCPDSACKIGNOREDDUBIOUS,       /* TCPDSACKIgnoredDubious */
+       LINUX_MIB_TCPMIGRATEREQSUCCESS,         /* TCPMigrateReqSuccess */
+       LINUX_MIB_TCPMIGRATEREQFAILURE,         /* TCPMigrateReqFailure */
        __LINUX_MIB_MAX
 };
 
index 50531a2..991d09b 100644 (file)
@@ -3852,10 +3852,33 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        qdisc_calculate_pkt_len(skb, q);
 
        if (q->flags & TCQ_F_NOLOCK) {
+               if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
+                   qdisc_run_begin(q)) {
+                       /* Retest nolock_qdisc_is_empty() within the protection
+                        * of q->seqlock to protect from racing with requeuing.
+                        */
+                       if (unlikely(!nolock_qdisc_is_empty(q))) {
+                               rc = q->enqueue(skb, q, &to_free) &
+                                       NET_XMIT_MASK;
+                               __qdisc_run(q);
+                               qdisc_run_end(q);
+
+                               goto no_lock_out;
+                       }
+
+                       qdisc_bstats_cpu_update(q, skb);
+                       if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
+                           !nolock_qdisc_is_empty(q))
+                               __qdisc_run(q);
+
+                       qdisc_run_end(q);
+                       return NET_XMIT_SUCCESS;
+               }
+
                rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
-               if (likely(!netif_xmit_frozen_or_stopped(txq)))
-                       qdisc_run(q);
+               qdisc_run(q);
 
+no_lock_out:
                if (unlikely(to_free))
                        kfree_skb_list(to_free);
                return rc;
index 566ddd1..8fdd04f 100644 (file)
@@ -2709,23 +2709,16 @@ static int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
                                    struct netlink_ext_ack *extack)
 {
        struct devlink_rate *devlink_rate;
-       u16 old_mode;
-       int err;
-
-       if (!devlink->ops->eswitch_mode_get)
-               return -EOPNOTSUPP;
-       err = devlink->ops->eswitch_mode_get(devlink, &old_mode);
-       if (err)
-               return err;
-
-       if (old_mode == mode)
-               return 0;
 
+       /* Take the lock to sync with devlink_rate_nodes_destroy() */
+       mutex_lock(&devlink->lock);
        list_for_each_entry(devlink_rate, &devlink->rate_list, list)
                if (devlink_rate_is_node(devlink_rate)) {
+                       mutex_unlock(&devlink->lock);
                        NL_SET_ERR_MSG_MOD(extack, "Rate node(s) exists.");
                        return -EBUSY;
                }
+       mutex_unlock(&devlink->lock);
        return 0;
 }
 
@@ -9275,6 +9268,8 @@ void devlink_rate_leaf_destroy(struct devlink_port *devlink_port)
 
        mutex_lock(&devlink->lock);
        devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
+       if (devlink_rate->parent)
+               refcount_dec(&devlink_rate->parent->refcnt);
        list_del(&devlink_rate->list);
        devlink_port->devlink_rate = NULL;
        mutex_unlock(&devlink->lock);
index ddfa880..a2337b3 100644 (file)
@@ -1635,6 +1635,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                v.val = sk->sk_bound_dev_if;
                break;
 
+       case SO_NETNS_COOKIE:
+               lv = sizeof(u64);
+               if (len != lv)
+                       return -EINVAL;
+               v.val64 = sock_net(sk)->net_cookie;
+               break;
+
        default:
                /* We implement the SO_SNDLOWAT etc to not be settable
                 * (1003.1g 7).
index de5ee3a..3f00a28 100644 (file)
@@ -6,6 +6,7 @@
  * selecting the socket index from the array of available sockets.
  */
 
+#include <net/ip.h>
 #include <net/sock_reuseport.h>
 #include <linux/bpf.h>
 #include <linux/idr.h>
@@ -536,7 +537,7 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
 
        socks = READ_ONCE(reuse->num_socks);
        if (unlikely(!socks))
-               goto out;
+               goto failure;
 
        /* paired with smp_wmb() in __reuseport_add_sock() */
        smp_rmb();
@@ -546,13 +547,13 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
        if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
                if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req)
                        goto select_by_hash;
-               goto out;
+               goto failure;
        }
 
        if (!skb) {
                skb = alloc_skb(0, GFP_ATOMIC);
                if (!skb)
-                       goto out;
+                       goto failure;
                allocated = true;
        }
 
@@ -565,12 +566,18 @@ select_by_hash:
        if (!nsk)
                nsk = reuseport_select_sock_by_hash(reuse, hash, socks);
 
-       if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt)))
+       if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) {
                nsk = NULL;
+               goto failure;
+       }
 
 out:
        rcu_read_unlock();
        return nsk;
+
+failure:
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+       goto out;
 }
 EXPORT_SYMBOL(reuseport_migrate_sock);
 
index 0eea878..754013f 100644 (file)
@@ -703,6 +703,8 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
 
        nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
        if (!nreq) {
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+
                /* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
                sock_put(sk);
                return NULL;
@@ -876,9 +878,10 @@ static void reqsk_timer_handler(struct timer_list *t)
                if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
                        /* delete timer */
                        inet_csk_reqsk_queue_drop(sk_listener, nreq);
-                       goto drop;
+                       goto no_ownership;
                }
 
+               __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
                reqsk_migrate_reset(oreq);
                reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
                reqsk_put(oreq);
@@ -887,17 +890,19 @@ static void reqsk_timer_handler(struct timer_list *t)
                return;
        }
 
-drop:
        /* Even if we can clone the req, we may need not retransmit any more
         * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
         * CPU may win the "own_req" race so that inet_ehash_insert() fails.
         */
        if (nreq) {
+               __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
+no_ownership:
                reqsk_migrate_reset(nreq);
                reqsk_queue_removed(queue, nreq);
                __reqsk_free(nreq);
        }
 
+drop:
        inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
 }
 
@@ -1135,11 +1140,13 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
 
                        refcount_set(&nreq->rsk_refcnt, 1);
                        if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
+                               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
                                reqsk_migrate_reset(req);
                                reqsk_put(req);
                                return child;
                        }
 
+                       __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
                        reqsk_migrate_reset(nreq);
                        __reqsk_free(nreq);
                } else if (inet_csk_reqsk_queue_add(sk, req, child)) {
@@ -1188,8 +1195,12 @@ void inet_csk_listen_stop(struct sock *sk)
                                refcount_set(&nreq->rsk_refcnt, 1);
 
                                if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
+                                       __NET_INC_STATS(sock_net(nsk),
+                                                       LINUX_MIB_TCPMIGRATEREQSUCCESS);
                                        reqsk_migrate_reset(req);
                                } else {
+                                       __NET_INC_STATS(sock_net(nsk),
+                                                       LINUX_MIB_TCPMIGRATEREQFAILURE);
                                        reqsk_migrate_reset(nreq);
                                        __reqsk_free(nreq);
                                }
index a68bf4c..12dca0c 100644 (file)
@@ -107,6 +107,8 @@ module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
+static const struct header_ops ipgre_header_ops;
+
 static int ipgre_tunnel_init(struct net_device *dev);
 static void erspan_build_header(struct sk_buff *skb,
                                u32 id, u32 index,
@@ -364,7 +366,10 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
                                           raw_proto, false) < 0)
                        goto drop;
 
-               if (tunnel->dev->type != ARPHRD_NONE)
+               /* Special case for ipgre_header_parse(), which expects the
+                * mac_header to point to the outer IP header.
+                */
+               if (tunnel->dev->header_ops == &ipgre_header_ops)
                        skb_pop_mac_header(skb);
                else
                        skb_reset_mac_header(skb);
index c3efc7d..8d8a8da 100644 (file)
@@ -1054,7 +1054,7 @@ static int __ip_append_data(struct sock *sk,
                        unsigned int datalen;
                        unsigned int fraglen;
                        unsigned int fraggap;
-                       unsigned int alloclen;
+                       unsigned int alloclen, alloc_extra;
                        unsigned int pagedlen;
                        struct sk_buff *skb_prev;
 alloc_new_skb:
@@ -1074,35 +1074,39 @@ alloc_new_skb:
                        fraglen = datalen + fragheaderlen;
                        pagedlen = 0;
 
+                       alloc_extra = hh_len + 15;
+                       alloc_extra += exthdrlen;
+
+                       /* The last fragment gets additional space at tail.
+                        * Note, with MSG_MORE we overallocate on fragments,
+                        * because we have no idea what fragment will be
+                        * the last.
+                        */
+                       if (datalen == length + fraggap)
+                               alloc_extra += rt->dst.trailer_len;
+
                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
-                       else if (!paged)
+                       else if (!paged &&
+                                (fraglen + alloc_extra < SKB_MAX_ALLOC ||
+                                 !(rt->dst.dev->features & NETIF_F_SG)))
                                alloclen = fraglen;
                        else {
                                alloclen = min_t(int, fraglen, MAX_HEADER);
                                pagedlen = fraglen - alloclen;
                        }
 
-                       alloclen += exthdrlen;
-
-                       /* The last fragment gets additional space at tail.
-                        * Note, with MSG_MORE we overallocate on fragments,
-                        * because we have no idea what fragment will be
-                        * the last.
-                        */
-                       if (datalen == length + fraggap)
-                               alloclen += rt->dst.trailer_len;
+                       alloclen += alloc_extra;
 
                        if (transhdrlen) {
-                               skb = sock_alloc_send_skb(sk,
-                                               alloclen + hh_len + 15,
+                               skb = sock_alloc_send_skb(sk, alloclen,
                                                (flags & MSG_DONTWAIT), &err);
                        } else {
                                skb = NULL;
                                if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
                                    2 * sk->sk_sndbuf)
-                                       skb = alloc_skb(alloclen + hh_len + 15,
+                                       skb = alloc_skb(alloclen,
                                                        sk->sk_allocation);
                                if (unlikely(!skb))
                                        err = -ENOBUFS;
index d5bfa08..266c655 100644 (file)
@@ -242,6 +242,8 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
                        if (!tun_dst)
                                return 0;
                }
+               skb_reset_mac_header(skb);
+
                return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
        }
 
index 6d46297..b0d3a09 100644 (file)
@@ -295,6 +295,8 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH),
        SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS),
        SNMP_MIB_ITEM("TCPDSACKIgnoredDubious", LINUX_MIB_TCPDSACKIGNOREDDUBIOUS),
+       SNMP_MIB_ITEM("TCPMigrateReqSuccess", LINUX_MIB_TCPMIGRATEREQSUCCESS),
+       SNMP_MIB_ITEM("TCPMigrateReqFailure", LINUX_MIB_TCPMIGRATEREQFAILURE),
        SNMP_MIB_SENTINEL
 };
 
index f258a4c..0a4f3f1 100644 (file)
@@ -786,6 +786,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        return inet_csk_complete_hashdance(sk, child, req, own_req);
 
 listen_overflow:
+       if (sk != req->rsk_listener)
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+
        if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
                inet_rsk(req)->acked = 1;
                return NULL;
index ff4f9eb..984050f 100644 (file)
@@ -1055,13 +1055,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
         * ip6_route_output will fail given src=any saddr, though, so
         * that's why we try it again later.
         */
-       if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
+       if (ipv6_addr_any(&fl6->saddr)) {
                struct fib6_info *from;
                struct rt6_info *rt;
-               bool had_dst = *dst != NULL;
 
-               if (!had_dst)
-                       *dst = ip6_route_output(net, sk, fl6);
+               *dst = ip6_route_output(net, sk, fl6);
                rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
 
                rcu_read_lock();
@@ -1078,7 +1076,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
                 * never existed and let the SA-enabled version take
                 * over.
                 */
-               if (!had_dst && (*dst)->error) {
+               if ((*dst)->error) {
                        dst_release(*dst);
                        *dst = NULL;
                }
@@ -1555,7 +1553,7 @@ emsgsize:
                        unsigned int datalen;
                        unsigned int fraglen;
                        unsigned int fraggap;
-                       unsigned int alloclen;
+                       unsigned int alloclen, alloc_extra;
                        unsigned int pagedlen;
 alloc_new_skb:
                        /* There's no room in the current skb */
@@ -1582,17 +1580,28 @@ alloc_new_skb:
                        fraglen = datalen + fragheaderlen;
                        pagedlen = 0;
 
+                       alloc_extra = hh_len;
+                       alloc_extra += dst_exthdrlen;
+                       alloc_extra += rt->dst.trailer_len;
+
+                       /* We just reserve space for fragment header.
+                        * Note: this may be overallocation if the message
+                        * (without MSG_MORE) fits into the MTU.
+                        */
+                       alloc_extra += sizeof(struct frag_hdr);
+
                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
-                       else if (!paged)
+                       else if (!paged &&
+                                (fraglen + alloc_extra < SKB_MAX_ALLOC ||
+                                 !(rt->dst.dev->features & NETIF_F_SG)))
                                alloclen = fraglen;
                        else {
                                alloclen = min_t(int, fraglen, MAX_HEADER);
                                pagedlen = fraglen - alloclen;
                        }
-
-                       alloclen += dst_exthdrlen;
+                       alloclen += alloc_extra;
 
                        if (datalen != length + fraggap) {
                                /*
@@ -1602,30 +1611,21 @@ alloc_new_skb:
                                datalen += rt->dst.trailer_len;
                        }
 
-                       alloclen += rt->dst.trailer_len;
                        fraglen = datalen + fragheaderlen;
 
-                       /*
-                        * We just reserve space for fragment header.
-                        * Note: this may be overallocation if the message
-                        * (without MSG_MORE) fits into the MTU.
-                        */
-                       alloclen += sizeof(struct frag_hdr);
-
                        copy = datalen - transhdrlen - fraggap - pagedlen;
                        if (copy < 0) {
                                err = -EINVAL;
                                goto error;
                        }
                        if (transhdrlen) {
-                               skb = sock_alloc_send_skb(sk,
-                                               alloclen + hh_len,
+                               skb = sock_alloc_send_skb(sk, alloclen,
                                                (flags & MSG_DONTWAIT), &err);
                        } else {
                                skb = NULL;
                                if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
                                    2 * sk->sk_sndbuf)
-                                       skb = alloc_skb(alloclen + hh_len,
+                                       skb = alloc_skb(alloclen,
                                                        sk->sk_allocation);
                                if (unlikely(!skb))
                                        err = -ENOBUFS;
index 288bafd..0b8a386 100644 (file)
@@ -837,6 +837,7 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
                skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
        } else {
                skb->dev = tunnel->dev;
+               skb_reset_mac_header(skb);
        }
 
        skb_reset_network_header(skb);
index e0a39b0..df5bea8 100644 (file)
@@ -710,6 +710,8 @@ static int ipip6_rcv(struct sk_buff *skb)
                 * old iph is no longer valid
                 */
                iph = (const struct iphdr *)skb_mac_header(skb);
+               skb_reset_mac_header(skb);
+
                err = IP_ECN_decapsulate(iph, skb);
                if (unlikely(err)) {
                        if (log_ecn_error)
@@ -780,6 +782,8 @@ static int sit_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
                        tpi = &ipip_tpi;
                if (iptunnel_pull_header(skb, 0, tpi->proto, false))
                        goto drop;
+               skb_reset_mac_header(skb);
+
                return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
        }
 
index ce0c45d..7bb8242 100644 (file)
@@ -455,7 +455,7 @@ static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
 {
        const struct inet_connection_sock *icsk = inet_csk(ssk);
-       bool ack_pending = READ_ONCE(icsk->icsk_ack.pending);
+       u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
        const struct tcp_sock *tp = tcp_sk(ssk);
 
        return (ack_pending & ICSK_ACK_SCHED) &&
index 87112da..049890e 100644 (file)
@@ -74,7 +74,7 @@ obj-$(CONFIG_NF_DUP_NETDEV)   += nf_dup_netdev.o
 nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
                  nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
                  nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
-                 nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o \
+                 nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o nft_last.o \
                  nft_chain_route.o nf_tables_offload.o \
                  nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o \
                  nft_set_pipapo.o
index be14e0b..5564740 100644 (file)
 static DEFINE_MUTEX(nf_ct_proto_mutex);
 
 #ifdef CONFIG_SYSCTL
-__printf(5, 6)
+__printf(4, 5)
 void nf_l4proto_log_invalid(const struct sk_buff *skb,
-                           struct net *net,
-                           u16 pf, u8 protonum,
+                           const struct nf_hook_state *state,
+                           u8 protonum,
                            const char *fmt, ...)
 {
+       struct net *net = state->net;
        struct va_format vaf;
        va_list args;
 
@@ -62,15 +63,16 @@ void nf_l4proto_log_invalid(const struct sk_buff *skb,
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
-                     "nf_ct_proto_%d: %pV ", protonum, &vaf);
+       nf_log_packet(net, state->pf, 0, skb, state->in, state->out,
+                     NULL, "nf_ct_proto_%d: %pV ", protonum, &vaf);
        va_end(args);
 }
 EXPORT_SYMBOL_GPL(nf_l4proto_log_invalid);
 
-__printf(3, 4)
+__printf(4, 5)
 void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
                               const struct nf_conn *ct,
+                              const struct nf_hook_state *state,
                               const char *fmt, ...)
 {
        struct va_format vaf;
@@ -85,7 +87,7 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       nf_l4proto_log_invalid(skb, net, nf_ct_l3num(ct),
+       nf_l4proto_log_invalid(skb, state,
                               nf_ct_protonum(ct), "%pV", &vaf);
        va_end(args);
 }
index 4f33307..c1557d4 100644 (file)
@@ -382,7 +382,8 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
 
 static noinline bool
 dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
-        const struct dccp_hdr *dh)
+        const struct dccp_hdr *dh,
+        const struct nf_hook_state *hook_state)
 {
        struct net *net = nf_ct_net(ct);
        struct nf_dccp_net *dn;
@@ -414,7 +415,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
        return true;
 
 out_invalid:
-       nf_ct_l4proto_log_invalid(skb, ct, "%s", msg);
+       nf_ct_l4proto_log_invalid(skb, ct, hook_state, "%s", msg);
        return false;
 }
 
@@ -464,8 +465,7 @@ static bool dccp_error(const struct dccp_hdr *dh,
        }
        return false;
 out_invalid:
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_DCCP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_DCCP, "%s", msg);
        return true;
 }
 
@@ -488,7 +488,7 @@ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
                return -NF_ACCEPT;
 
        type = dh->dccph_type;
-       if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh))
+       if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state))
                return -NF_ACCEPT;
 
        if (type == DCCP_PKT_RESET &&
@@ -543,11 +543,11 @@ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
                ct->proto.dccp.last_pkt = type;
 
                spin_unlock_bh(&ct->lock);
-               nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid packet");
+               nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid packet");
                return NF_ACCEPT;
        case CT_DCCP_INVALID:
                spin_unlock_bh(&ct->lock);
-               nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid state transition");
+               nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid state transition");
                return -NF_ACCEPT;
        }
 
index 4efd874..b38b716 100644 (file)
@@ -170,12 +170,12 @@ int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
        ct_daddr = &ct->tuplehash[dir].tuple.dst.u3;
        if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) {
                if (state->pf == AF_INET) {
-                       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                       nf_l4proto_log_invalid(skb, state,
                                               l4proto,
                                               "outer daddr %pI4 != inner %pI4",
                                               &outer_daddr->ip, &ct_daddr->ip);
                } else if (state->pf == AF_INET6) {
-                       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                       nf_l4proto_log_invalid(skb, state,
                                               l4proto,
                                               "outer daddr %pI6 != inner %pI6",
                                               &outer_daddr->ip6, &ct_daddr->ip6);
@@ -197,8 +197,7 @@ static void icmp_error_log(const struct sk_buff *skb,
                           const struct nf_hook_state *state,
                           const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_ICMP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_ICMP, "%s", msg);
 }
 
 /* Small and modified version of icmp_rcv */
index facd8c6..61e3b05 100644 (file)
@@ -126,8 +126,7 @@ static void icmpv6_error_log(const struct sk_buff *skb,
                             const struct nf_hook_state *state,
                             const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_ICMPV6, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_ICMPV6, "%s", msg);
 }
 
 int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
index fb8dc02..2394238 100644 (file)
@@ -351,7 +351,7 @@ static bool sctp_error(struct sk_buff *skb,
        }
        return false;
 out_invalid:
-       nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_SCTP, "%s", logmsg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_SCTP, "%s", logmsg);
        return true;
 }
 
index de840fc..f7e8baf 100644 (file)
@@ -446,14 +446,15 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
        }
 }
 
-static bool tcp_in_window(const struct nf_conn *ct,
-                         struct ip_ct_tcp *state,
+static bool tcp_in_window(struct nf_conn *ct,
                          enum ip_conntrack_dir dir,
                          unsigned int index,
                          const struct sk_buff *skb,
                          unsigned int dataoff,
-                         const struct tcphdr *tcph)
+                         const struct tcphdr *tcph,
+                         const struct nf_hook_state *hook_state)
 {
+       struct ip_ct_tcp *state = &ct->proto.tcp;
        struct net *net = nf_ct_net(ct);
        struct nf_tcp_net *tn = nf_tcp_pernet(net);
        struct ip_ct_tcp_state *sender = &state->seen[dir];
@@ -670,7 +671,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
                    tn->tcp_be_liberal)
                        res = true;
                if (!res) {
-                       nf_ct_l4proto_log_invalid(skb, ct,
+                       nf_ct_l4proto_log_invalid(skb, ct, hook_state,
                        "%s",
                        before(seq, sender->td_maxend + 1) ?
                        in_recv_win ?
@@ -710,7 +711,7 @@ static void tcp_error_log(const struct sk_buff *skb,
                          const struct nf_hook_state *state,
                          const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_TCP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
 }
 
 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
@@ -970,7 +971,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                                        IP_CT_EXP_CHALLENGE_ACK;
                }
                spin_unlock_bh(&ct->lock);
-               nf_ct_l4proto_log_invalid(skb, ct,
+               nf_ct_l4proto_log_invalid(skb, ct, state,
                                          "packet (index %d) in dir %d ignored, state %s",
                                          index, dir,
                                          tcp_conntrack_names[old_state]);
@@ -995,7 +996,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
                         dir, get_conntrack_index(th), old_state);
                spin_unlock_bh(&ct->lock);
-               nf_ct_l4proto_log_invalid(skb, ct, "invalid state");
+               nf_ct_l4proto_log_invalid(skb, ct, state, "invalid state");
                return -NF_ACCEPT;
        case TCP_CONNTRACK_TIME_WAIT:
                /* RFC5961 compliance cause stack to send "challenge-ACK"
@@ -1010,7 +1011,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                        /* Detected RFC5961 challenge ACK */
                        ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
                        spin_unlock_bh(&ct->lock);
-                       nf_ct_l4proto_log_invalid(skb, ct, "challenge-ack ignored");
+                       nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
                        return NF_ACCEPT; /* Don't change state */
                }
                break;
@@ -1035,7 +1036,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                        if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
                                /* Invalid RST  */
                                spin_unlock_bh(&ct->lock);
-                               nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
+                               nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
                                return -NF_ACCEPT;
                        }
 
@@ -1079,8 +1080,8 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                break;
        }
 
-       if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
-                          skb, dataoff, th)) {
+       if (!tcp_in_window(ct, dir, index,
+                          skb, dataoff, th, state)) {
                spin_unlock_bh(&ct->lock);
                return -NF_ACCEPT;
        }
index 68911fc..698fee4 100644 (file)
@@ -38,8 +38,7 @@ static void udp_error_log(const struct sk_buff *skb,
                          const struct nf_hook_state *state,
                          const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_UDP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_UDP, "%s", msg);
 }
 
 static bool udp_error(struct sk_buff *skb,
@@ -130,8 +129,7 @@ static void udplite_error_log(const struct sk_buff *skb,
                              const struct nf_hook_state *state,
                              const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_UDPLITE, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_UDPLITE, "%s", msg);
 }
 
 static bool udplite_error(struct sk_buff *skb,
index 7780342..866cfba 100644 (file)
@@ -268,6 +268,7 @@ static struct nft_expr_type *nft_basic_types[] = {
        &nft_meta_type,
        &nft_rt_type,
        &nft_exthdr_type,
+       &nft_last_type,
 };
 
 static struct nft_object_type *nft_basic_objects[] = {
index 58fda6a..50b4e3c 100644 (file)
@@ -126,8 +126,10 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
 
 #ifdef CONFIG_KALLSYMS
        ret = snprintf(sym, sizeof(sym), "%ps", ops->hook);
-       if (ret < 0 || ret > (int)sizeof(sym))
+       if (ret >= sizeof(sym)) {
+               ret = -EINVAL;
                goto nla_put_failure;
+       }
 
        module_name = strstr(sym, " [");
        if (module_name) {
index 7f705b5..4f583d2 100644 (file)
@@ -164,7 +164,7 @@ nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
 {
        struct tcphdr *tcph;
 
-       if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP)
+       if (pkt->tprot != IPPROTO_TCP)
                return NULL;
 
        tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);
@@ -312,6 +312,9 @@ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
        const struct sctp_chunkhdr *sch;
        struct sctp_chunkhdr _sch;
 
+       if (pkt->tprot != IPPROTO_SCTP)
+               goto err;
+
        do {
                sch = skb_header_pointer(pkt->skb, offset, sizeof(_sch), &_sch);
                if (!sch || !sch->length)
@@ -334,7 +337,7 @@ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
                }
                offset += SCTP_PAD4(ntohs(sch->length));
        } while (offset < pkt->skb->len);
-
+err:
        if (priv->flags & NFT_EXTHDR_F_PRESENT)
                nft_reg_store8(dest, false);
        else
diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
new file mode 100644 (file)
index 0000000..913ac45
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_last_priv {
+       unsigned long   last_jiffies;
+       unsigned int    last_set;
+};
+
+static const struct nla_policy nft_last_policy[NFTA_LAST_MAX + 1] = {
+       [NFTA_LAST_SET] = { .type = NLA_U32 },
+       [NFTA_LAST_MSECS] = { .type = NLA_U64 },
+};
+
+static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                        const struct nlattr * const tb[])
+{
+       struct nft_last_priv *priv = nft_expr_priv(expr);
+       u64 last_jiffies;
+       int err;
+
+       if (tb[NFTA_LAST_MSECS]) {
+               err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies);
+               if (err < 0)
+                       return err;
+
+               priv->last_jiffies = jiffies + (unsigned long)last_jiffies;
+               priv->last_set = 1;
+       }
+
+       return 0;
+}
+
+static void nft_last_eval(const struct nft_expr *expr,
+                         struct nft_regs *regs, const struct nft_pktinfo *pkt)
+{
+       struct nft_last_priv *priv = nft_expr_priv(expr);
+
+       priv->last_jiffies = jiffies;
+       priv->last_set = 1;
+}
+
+static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_last_priv *priv = nft_expr_priv(expr);
+       __be64 msecs;
+
+       if (time_before(jiffies, priv->last_jiffies))
+               priv->last_set = 0;
+
+       if (priv->last_set)
+               msecs = nf_jiffies64_to_msecs(jiffies - priv->last_jiffies);
+       else
+               msecs = 0;
+
+       if (nla_put_be32(skb, NFTA_LAST_SET, htonl(priv->last_set)) ||
+           nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static const struct nft_expr_ops nft_last_ops = {
+       .type           = &nft_last_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_last_priv)),
+       .eval           = nft_last_eval,
+       .init           = nft_last_init,
+       .dump           = nft_last_dump,
+};
+
+struct nft_expr_type nft_last_type __read_mostly = {
+       .name           = "last",
+       .ops            = &nft_last_ops,
+       .policy         = nft_last_policy,
+       .maxattr        = NFTA_LAST_MAX,
+       .flags          = NFT_EXPR_STATEFUL,
+       .owner          = THIS_MODULE,
+};
index e9c0afc..d9ac60f 100644 (file)
@@ -52,6 +52,8 @@ static void qdisc_maybe_clear_missed(struct Qdisc *q,
         */
        if (!netif_xmit_frozen_or_stopped(txq))
                set_bit(__QDISC_STATE_MISSED, &q->state);
+       else
+               set_bit(__QDISC_STATE_DRAINING, &q->state);
 }
 
 /* Main transmission queue. */
@@ -164,9 +166,13 @@ static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 
                skb = next;
        }
-       if (lock)
+
+       if (lock) {
                spin_unlock(lock);
-       __netif_schedule(q);
+               set_bit(__QDISC_STATE_MISSED, &q->state);
+       } else {
+               __netif_schedule(q);
+       }
 }
 
 static void try_bulk_dequeue_skb(struct Qdisc *q,
@@ -409,7 +415,11 @@ void __qdisc_run(struct Qdisc *q)
        while (qdisc_restart(q, &packets)) {
                quota -= packets;
                if (quota <= 0) {
-                       __netif_schedule(q);
+                       if (q->flags & TCQ_F_NOLOCK)
+                               set_bit(__QDISC_STATE_MISSED, &q->state);
+                       else
+                               __netif_schedule(q);
+
                        break;
                }
        }
@@ -698,13 +708,14 @@ retry:
        if (likely(skb)) {
                qdisc_update_stats_at_dequeue(qdisc, skb);
        } else if (need_retry &&
-                  test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
+                  READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
                /* Delay clearing the STATE_MISSED here to reduce
                 * the overhead of the second spin_trylock() in
                 * qdisc_run_begin() and __netif_schedule() calling
                 * in qdisc_run_end().
                 */
                clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+               clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
 
                /* Make sure dequeuing happens after clearing
                 * STATE_MISSED.
@@ -714,8 +725,6 @@ retry:
                need_retry = false;
 
                goto retry;
-       } else {
-               WRITE_ONCE(qdisc->empty, true);
        }
 
        return skb;
@@ -916,7 +925,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev_queue = dev_queue;
-       sch->empty = true;
        dev_hold(dev);
        refcount_set(&sch->refcnt, 1);
 
@@ -1222,6 +1230,7 @@ static void dev_reset_queue(struct net_device *dev,
        spin_unlock_bh(qdisc_lock(qdisc));
        if (nolock) {
                clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+               clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
                spin_unlock_bh(&qdisc->seqlock);
        }
 }
index d29b579..09a8f23 100644 (file)
@@ -1275,7 +1275,10 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
                        return SCTP_DISPOSITION_DISCARD;
 
                sctp_transport_pl_recv(link);
-               return SCTP_DISPOSITION_CONSUME;
+               if (link->pl.state == SCTP_PL_COMPLETE)
+                       return SCTP_DISPOSITION_CONSUME;
+
+               return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
        }
 
        max_interval = link->hbinterval + link->rto;
index f27b856..5f23804 100644 (file)
@@ -213,15 +213,10 @@ void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
 
 void sctp_transport_reset_probe_timer(struct sctp_transport *transport)
 {
-       int scale = 1;
-
        if (timer_pending(&transport->probe_timer))
                return;
-       if (transport->pl.state == SCTP_PL_COMPLETE &&
-           transport->pl.probe_count == 1)
-               scale = 30; /* works as PMTU_RAISE_TIMER */
        if (!mod_timer(&transport->probe_timer,
-                      jiffies + transport->probe_interval * scale))
+                      jiffies + transport->probe_interval))
                sctp_transport_hold(transport);
 }
 
@@ -333,13 +328,15 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
                t->pl.probe_size += SCTP_PL_MIN_STEP;
                if (t->pl.probe_size >= t->pl.probe_high) {
                        t->pl.probe_high = 0;
+                       t->pl.raise_count = 0;
                        t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */
 
                        t->pl.probe_size = t->pl.pmtu;
                        t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
                        sctp_assoc_sync_pmtu(t->asoc);
                }
-       } else if (t->pl.state == SCTP_PL_COMPLETE) {
+       } else if (t->pl.state == SCTP_PL_COMPLETE && ++t->pl.raise_count == 30) {
+               /* Raise probe_size again after 30 * interval in Search Complete */
                t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
                t->pl.probe_size += SCTP_PL_MIN_STEP;
        }
index 075c4f4..289025c 100644 (file)
@@ -154,6 +154,9 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
                goto out_err;
        }
 
+       if (sk->sk_state == SMC_INIT)
+               return -ENOTCONN;
+
        if (len > conn->sndbuf_desc->len)
                SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk);
 
@@ -164,8 +167,6 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
                SMC_STAT_INC(smc, urg_data_cnt);
 
        while (msg_data_left(msg)) {
-               if (sk->sk_state == SMC_INIT)
-                       return -ENOTCONN;
                if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
                    (smc->sk.sk_err == ECONNABORTED) ||
                    conn->killed)
index e4cb0ff..e321fc6 100644 (file)
@@ -565,6 +565,42 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
        return 0;
 }
 
+/* For partial checksum offload, the outer header checksum is calculated
+ * by software and the inner header checksum is calculated by hardware.
+ * This requires hardware to know the inner packet type to calculate
+ * the inner header checksum. Save inner ip protocol here to avoid
+ * traversing the packet in the vendor's xmit code.
+ * If the encap type is IPIP, just save skb->inner_ipproto. Otherwise,
+ * get the ip protocol from the IP header.
+ */
+static void xfrm_get_inner_ipproto(struct sk_buff *skb)
+{
+       struct xfrm_offload *xo = xfrm_offload(skb);
+       const struct ethhdr *eth;
+
+       if (!xo)
+               return;
+
+       if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
+               xo->inner_ipproto = skb->inner_ipproto;
+               return;
+       }
+
+       if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+               return;
+
+       eth = (struct ethhdr *)skb_inner_mac_header(skb);
+
+       switch (ntohs(eth->h_proto)) {
+       case ETH_P_IPV6:
+               xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
+               break;
+       case ETH_P_IP:
+               xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
+               break;
+       }
+}
+
 int xfrm_output(struct sock *sk, struct sk_buff *skb)
 {
        struct net *net = dev_net(skb_dst(skb)->dev);
@@ -594,12 +630,15 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
                        kfree_skb(skb);
                        return -ENOMEM;
                }
-               skb->encapsulation = 1;
 
                sp->olen++;
                sp->xvec[sp->len++] = x;
                xfrm_state_hold(x);
 
+               if (skb->encapsulation)
+                       xfrm_get_inner_ipproto(skb);
+               skb->encapsulation = 1;
+
                if (skb_is_gso(skb)) {
                        if (skb->inner_protocol)
                                return xfrm_output_gso(net, sk, skb);
index 61ae899..19deb9c 100644 (file)
@@ -30,3 +30,4 @@ hwtstamp_config
 rxtimestamp
 timestamping
 txtimestamp
+so_netns_cookie
index 3915bb7..79c9eb0 100644 (file)
@@ -30,7 +30,7 @@ TEST_GEN_FILES =  socket nettest
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
 TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
 TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
-TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr
+TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr so_netns_cookie
 TEST_GEN_FILES += tcp_fastopen_backup_key
 TEST_GEN_FILES += fin_ack_lat
 TEST_GEN_FILES += reuseaddr_ports_exhausted
index 614d547..6f905b5 100644 (file)
@@ -1,4 +1,5 @@
 CONFIG_USER_NS=y
+CONFIG_NET_NS=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
 CONFIG_NUMA=y
index bf361f3..c19ecc6 100755 (executable)
@@ -63,10 +63,14 @@ log_test()
        local rc=$1
        local expected=$2
        local msg="$3"
+       local xfail=$4
 
        if [ ${rc} -eq ${expected} ]; then
                printf "TEST: %-60s  [ OK ]\n" "${msg}"
                nsuccess=$((nsuccess+1))
+       elif [ ${rc} -eq ${xfail} ]; then
+               printf "TEST: %-60s  [XFAIL]\n" "${msg}"
+               nxfail=$((nxfail+1))
        else
                ret=1
                nfail=$((nfail+1))
@@ -322,7 +326,7 @@ check_exception()
                ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \
                grep -v "mtu" | grep -q "${R1_LLADDR}"
        fi
-       log_test $? 0 "IPv6: ${desc}"
+       log_test $? 0 "IPv6: ${desc}" 1
 }
 
 run_ping()
@@ -488,6 +492,7 @@ which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
 ret=0
 nsuccess=0
 nfail=0
+nxfail=0
 
 while getopts :pv o
 do
@@ -532,5 +537,6 @@ fi
 
 printf "\nTests passed: %3d\n" ${nsuccess}
 printf "Tests failed: %3d\n"   ${nfail}
+printf "Tests xfailed: %3d\n"  ${nxfail}
 
 exit $ret
diff --git a/tools/testing/selftests/net/so_netns_cookie.c b/tools/testing/selftests/net/so_netns_cookie.c
new file mode 100644 (file)
index 0000000..b39e87e
--- /dev/null
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sched.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#ifndef SO_NETNS_COOKIE
+#define SO_NETNS_COOKIE 71
+#endif
+
+#define pr_err(fmt, ...) \
+       ({ \
+               fprintf(stderr, "%s:%d:" fmt ": %m\n", \
+                       __func__, __LINE__, ##__VA_ARGS__); \
+               1; \
+       })
+
+int main(int argc, char *argvp[])
+{
+       uint64_t cookie1, cookie2;
+       socklen_t vallen;
+       int sock1, sock2;
+
+       sock1 = socket(AF_INET, SOCK_STREAM, 0);
+       if (sock1 < 0)
+               return pr_err("Unable to create TCP socket");
+
+       vallen = sizeof(cookie1);
+       if (getsockopt(sock1, SOL_SOCKET, SO_NETNS_COOKIE, &cookie1, &vallen) != 0)
+               return pr_err("getsockopt(SOL_SOCKET, SO_NETNS_COOKIE)");
+
+       if (!cookie1)
+               return pr_err("SO_NETNS_COOKIE returned zero cookie");
+
+       if (unshare(CLONE_NEWNET))
+               return pr_err("unshare");
+
+       sock2 = socket(AF_INET, SOCK_STREAM, 0);
+       if (sock2 < 0)
+               return pr_err("Unable to create TCP socket");
+
+       vallen = sizeof(cookie2);
+       if (getsockopt(sock2, SOL_SOCKET, SO_NETNS_COOKIE, &cookie2, &vallen) != 0)
+               return pr_err("getsockopt(SOL_SOCKET, SO_NETNS_COOKIE)");
+
+       if (!cookie2)
+               return pr_err("SO_NETNS_COOKIE returned zero cookie");
+
+       if (cookie1 == cookie2)
+               return pr_err("SO_NETNS_COOKIE returned identical cookies for distinct ns");
+
+       close(sock1);
+       close(sock2);
+       return 0;
+}